diff --git a/.github/workflows/build-and-publish.yml b/.github/workflows/build-and-publish.yml index 32c90d1..d11601d 100644 --- a/.github/workflows/build-and-publish.yml +++ b/.github/workflows/build-and-publish.yml @@ -42,6 +42,7 @@ jobs: runs-on: ubuntu-latest env: BEETLE_GPG_KEY: "${{ secrets.BEETLE_CI_GPG }}" + BEETLE_GPG_KEY_ID: "${{ secrets.BEETLE_CI_GPG_KEY_ID }}" REDIS_HOST: "${{ secrets.PIO_REDIS_HOST }}" REDIS_PORT: "${{ secrets.PIO_REDIS_PORT }}" REDIS_AUTH_USERNAME: "${{ secrets.PIO_REDIS_AUTH_USERNAME }}" @@ -84,25 +85,25 @@ jobs: - name: "bundle(firebeetle): copy-bin" run: | - gpg --trust-model always -e -r 99D22D47AA5053FC -o \ + gpg --trust-model always -e -r $BEETLE_GPG_KEY_ID -o \ $DISTRIBUTABLE_DIRECTORY_NAME/firebeetle/firmware.bin.pgp \ .pio/build/firebeetle/firmware.bin - name: "bundle(firebeetle): copy-map" run: | - gpg --trust-model always -e -r 99D22D47AA5053FC -o \ + gpg --trust-model always -e -r $BEETLE_GPG_KEY_ID -o \ $DISTRIBUTABLE_DIRECTORY_NAME/firebeetle/firmware.map.pgp \ .pio/build/firebeetle/firmware.map - name: "bundle(firebeetle): copy-elf" run: | - gpg --trust-model always -e -r 99D22D47AA5053FC -o \ + gpg --trust-model always -e -r $BEETLE_GPG_KEY_ID -o \ $DISTRIBUTABLE_DIRECTORY_NAME/firebeetle/firmware.elf.pgp \ .pio/build/firebeetle/firmware.elf - name: "bundle(firebeetle): copy-partitions" run: | - gpg --trust-model always -e -r 99D22D47AA5053FC -o \ + gpg --trust-model always -e -r $BEETLE_GPG_KEY_ID -o \ $DISTRIBUTABLE_DIRECTORY_NAME/firebeetle/partitions.bin.pgp \ .pio/build/firebeetle/partitions.bin @@ -120,25 +121,25 @@ jobs: - name: "bundle(xiao): copy-bin" run: | - gpg --trust-model always -e -r 99D22D47AA5053FC -o \ + gpg --trust-model always -e -r $BEETLE_GPG_KEY_ID -o \ $DISTRIBUTABLE_DIRECTORY_NAME/xiao/firmware.bin.pgp \ .pio/build/xiao/firmware.bin - name: "bundle(xiao): copy-map" run: | - gpg --trust-model always -e -r 99D22D47AA5053FC -o \ + gpg --trust-model always -e -r $BEETLE_GPG_KEY_ID -o \ $DISTRIBUTABLE_DIRECTORY_NAME/xiao/firmware.map.pgp \ .pio/build/xiao/firmware.map - name: "bundle(xiao): copy-elf" run: | - gpg --trust-model always -e -r 99D22D47AA5053FC -o \ + gpg --trust-model always -e -r $BEETLE_GPG_KEY_ID -o \ $DISTRIBUTABLE_DIRECTORY_NAME/xiao/firmware.elf.pgp \ .pio/build/xiao/firmware.elf - name: "bundle(xiao): copy-partitions" run: | - gpg --trust-model always -e -r 99D22D47AA5053FC -o \ + gpg --trust-model always -e -r $BEETLE_GPG_KEY_ID -o \ $DISTRIBUTABLE_DIRECTORY_NAME/xiao/partitions.bin.pgp \ .pio/build/xiao/partitions.bin @@ -149,7 +150,7 @@ jobs: ls -lah $HOME/.platformio/packages/framework-arduinoespressif32 ls -lah $HOME/.platformio/packages/framework-arduinoespressif32/tools ls -lah $HOME/.platformio/packages/framework-arduinoespressif32/tools/partitions - gpg --trust-model always -e -r 99D22D47AA5053FC -o \ + gpg --trust-model always -e -r $BEETLE_GPG_KEY_ID -o \ $DISTRIBUTABLE_DIRECTORY_NAME/xiao/boot_app0.bin.pgp \ $HOME/.platformio/packages/framework-arduinoespressif32/tools/partitions/boot_app0.bin diff --git a/Cargo.lock b/Cargo.lock index e94a55d..cd4c228 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -100,9 +100,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" +checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" dependencies = [ "memchr", ] @@ -163,9 +163,9 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188" +checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c" dependencies = [ "anstyle", "windows-sys 0.48.0", @@ -173,9 +173,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.71" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" +checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "approx" @@ -225,9 +225,9 @@ dependencies = [ [[package]] name = "async-channel" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ "concurrent-queue", "event-listener", @@ -311,9 +311,9 @@ dependencies = [ [[package]] name = "async-lock" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" dependencies = [ "event-listener", ] @@ -372,7 +372,7 @@ dependencies = [ "log", "memchr", "once_cell", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "pin-utils", "slab", "wasm-bindgen-futures", @@ -414,13 +414,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.71" +version = "0.1.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a564d521dd56509c4c47480d00b80ee55f7e385ae48db5744c67ad50c92d2ebf" +checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.29", ] [[package]] @@ -494,7 +494,7 @@ dependencies = [ "serde_json", "toml", "url", - "uuid 1.4.0", + "uuid 1.4.1", ] [[package]] @@ -524,7 +524,7 @@ dependencies = [ "tide", "toml", "url", - "uuid 1.4.0", + "uuid 1.4.1", ] [[package]] @@ -556,9 +556,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" +checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" [[package]] name = "bitvec" @@ -629,9 +629,9 @@ dependencies = [ "serde_bytes", "serde_json", "serde_with", - "time 0.3.22", + "time 0.3.26", "uuid 0.8.2", - "uuid 1.4.0", + "uuid 1.4.1", ] [[package]] @@ -657,7 +657,7 @@ checksum = "fdde5c9cd29ebd706ce1b35600920a33550e402fc998a2e53ad3b42c3c47a192" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.29", ] [[package]] @@ -694,9 +694,12 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.79" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "libc", +] [[package]] name = "cfg-if" @@ -746,9 +749,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.11" +version = "4.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1640e5cc7fb47dbb8338fd471b105e7ed6c3cb2aeb00c2e067127ffd3764a05d" +checksum = "03aef18ddf7d879c15ce20f04826ef8418101c7e528014c3eeea13321047dca3" dependencies = [ "clap_builder", "clap_derive", @@ -757,9 +760,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.11" +version = "4.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98c59138d527eeaf9b53f35a77fcc1fad9d883116070c63d5de1c7dc7b00c72b" +checksum = "f8ce6fffb678c9b80a70b6b6de0aad31df727623a70fd9a842c30cd573e2fa98" dependencies = [ "anstream", "anstyle", @@ -769,14 +772,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.3.2" +version = "4.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" +checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.29", ] [[package]] @@ -1147,9 +1150,9 @@ dependencies = [ [[package]] name = "curl-sys" -version = "0.4.63+curl-8.1.2" +version = "0.4.65+curl-8.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aeb0fef7046022a1e2ad67a004978f0e3cacb9e3123dc62ce768f92197b771dc" +checksum = "961ba061c9ef2fe34bbd12b807152d96f0badd2bebe7b90ce6c8c8b7572a0986" dependencies = [ "cc", "libc", @@ -1219,6 +1222,12 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +[[package]] +name = "deranged" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" + [[package]] name = "derivative" version = "2.2.0" @@ -1312,9 +1321,9 @@ dependencies = [ [[package]] name = "either" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "encase" @@ -1380,24 +1389,24 @@ dependencies = [ [[package]] name = "equivalent" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "erased-serde" -version = "0.3.27" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f94c0e13118e7d7533271f754a168ae8400e6a1cc043f2bfd53cc7290f1a1de3" +checksum = "fc978899517288e3ebbd1a3bfc1d9537dbb87eeab149e53ea490e63bcdff561a" dependencies = [ "serde", ] [[package]] name = "errno" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" dependencies = [ "errno-dragonfly", "libc", @@ -1451,9 +1460,9 @@ dependencies = [ [[package]] name = "exr" -version = "1.6.5" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85a7b44a196573e272e0cf0bcf130281c71e9a0c67062954b3323fd364bfdac9" +checksum = "d1e481eb11a482815d3e9d618db8c42a93207134662873809335a92327440c18" dependencies = [ "bit_field", "flume 0.10.14", @@ -1494,9 +1503,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" +checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" dependencies = [ "crc32fast", "miniz_oxide", @@ -1559,7 +1568,7 @@ checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.29", ] [[package]] @@ -1671,7 +1680,7 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "waker-fn", ] @@ -1683,7 +1692,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.29", ] [[package]] @@ -1711,7 +1720,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "pin-utils", "slab", ] @@ -1838,9 +1847,9 @@ dependencies = [ [[package]] name = "glow" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "807edf58b70c0b5b2181dd39fe1839dbdb3ba02645630dc5f753e23da307f762" +checksum = "ca0fe580e4b60a8ab24a868bc08e2f03cbcb20d3d676601fa909386713333728" dependencies = [ "js-sys", "slotmap", @@ -2166,7 +2175,7 @@ dependencies = [ "cookie", "futures-lite", "infer", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "rand 0.7.3", "serde", "serde_json", @@ -2385,9 +2394,9 @@ dependencies = [ [[package]] name = "image" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527909aa81e20ac3a44803521443a765550f09b5130c2c2fa1ea59c2f8f50a3a" +checksum = "6f3dfdbdd72063086ff443e297b61695500514b1e41095b6fb9a5ab48a70a711" dependencies = [ "bytemuck", "byteorder", @@ -2494,7 +2503,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", - "rustix 0.38.3", + "rustix 0.38.8", "windows-sys 0.48.0", ] @@ -2532,9 +2541,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b02a5381cc465bd3041d84623d0fa3b66738b52b8e2fc3bab8ad63ab032f4a" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "jni-sys" @@ -2603,7 +2612,8 @@ checksum = "e2db585e1d738fc771bf08a151420d3ed193d9d895a36df7f6f8a9456b911ddc" [[package]] name = "kramer" version = "3.0.0" -source = "git+https://github.com/sizethree/kramer.git?branch=async-read#95b6c3baf6d653272aa3b39fc91cfaa81fd94a29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9421977b9504c3f0f95136f665531bc2effc85971670d7b1fbb4062b1edb5c20" dependencies = [ "async-std", ] @@ -2657,9 +2667,9 @@ dependencies = [ [[package]] name = "libnghttp2-sys" -version = "0.1.7+1.45.0" +version = "0.1.8+1.55.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ed28aba195b38d5ff02b9170cbff627e336a20925e43b4945390401c5dc93f" +checksum = "4fae956c192dadcdb5dace96db71fa0b827333cce7c7b38dc71446f024d8a340" dependencies = [ "cc", "libc", @@ -2667,9 +2677,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.9" +version = "1.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56ee889ecc9568871456d42f603d6a0ce59ff328d291063a45cbdf0036baf6db" +checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" dependencies = [ "cc", "libc", @@ -2691,9 +2701,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" +checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" [[package]] name = "lock_api" @@ -2707,9 +2717,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" dependencies = [ "value-bag", ] @@ -2856,9 +2866,9 @@ dependencies = [ [[package]] name = "mongodb" -version = "2.6.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebcd85ec209a5b84fd9f54b9e381f6fa17462bc74160d018fc94fd8b9f61faa8" +checksum = "16928502631c0db72214720aa479c722397fe5aed6bf1c740a3830b3fe4bfcfe" dependencies = [ "async-std", "async-std-resolver", @@ -2898,7 +2908,7 @@ dependencies = [ "trust-dns-proto", "trust-dns-resolver", "typed-builder", - "uuid 1.4.0", + "uuid 1.4.1", "webpki-roots 0.22.6", ] @@ -3060,9 +3070,9 @@ dependencies = [ [[package]] name = "num" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606" +checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af" dependencies = [ "num-bigint 0.4.3", "num-complex", @@ -3096,9 +3106,9 @@ dependencies = [ [[package]] name = "num-complex" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d" +checksum = "1ba157ca0885411de85d6ca030ba7e2a83a28636056c7c699b07c8b6f7383214" dependencies = [ "num-traits", ] @@ -3138,9 +3148,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" dependencies = [ "autocfg", ] @@ -3244,9 +3254,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.90" +version = "0.9.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" +checksum = "866b5f16f90776b9bb8dc1e1802ac6f0513de3a7a7465867bfbc563dc737faac" dependencies = [ "cc", "libc", @@ -3256,9 +3266,9 @@ dependencies = [ [[package]] name = "ordered-float" -version = "3.7.0" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fc2dbde8f8a79f2102cc474ceb0ad68e3b80b85289ea62389b60e66777e4213" +checksum = "126d3e6f3926bfb0fb24495b4f4da50626f547e54956594748e3d8882a0320b4" dependencies = [ "num-traits", ] @@ -3365,14 +3375,14 @@ dependencies = [ "libc", "redox_syscall 0.3.5", "smallvec", - "windows-targets 0.48.1", + "windows-targets 0.48.5", ] [[package]] name = "paste" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4b27ab7be369122c218afc2079489cdcb4b517c0a3fc386ff11e1fedfcc2b35" +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] name = "pbkdf2" @@ -3430,7 +3440,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.29", ] [[package]] @@ -3444,22 +3454,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.29", ] [[package]] @@ -3470,9 +3480,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.10" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57" +checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" [[package]] name = "pin-utils" @@ -3488,9 +3498,9 @@ checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "png" -version = "0.17.9" +version = "0.17.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59871cc5b6cce7eaccca5a802b4173377a1c2ba90654246789a8fa2334426d11" +checksum = "dd75bf2d8dd3702b9707cdbc56a5b9ef42cec752eb8b3bafc01234558442aa64" dependencies = [ "bitflags 1.3.2", "crc32fast", @@ -3511,7 +3521,7 @@ dependencies = [ "concurrent-queue", "libc", "log", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "windows-sys 0.48.0", ] @@ -3550,18 +3560,18 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.63" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] [[package]] name = "profiling" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "332cd62e95873ea4f41f3dfd6bbbfc5b52aec892d7e8d534197c4720a0bbbab2" +checksum = "46b2164ebdb1dfeec5e337be164292351e11daf63a05174c6776b2f47460f0c9" [[package]] name = "qoi" @@ -3590,9 +3600,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.29" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] @@ -3762,9 +3772,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.0" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89089e897c013b3deb627116ae56a6955a72b8bed395c9526af31c9fe528b484" +checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" dependencies = [ "aho-corasick", "memchr", @@ -3774,9 +3784,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.0" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa250384981ea14565685dea16a9ccc4d1c541a13f82b9c168572264d1df8c56" +checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" dependencies = [ "aho-corasick", "memchr", @@ -3785,9 +3795,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846" +checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" [[package]] name = "renderdoc-sys" @@ -3853,7 +3863,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.17", + "semver 1.0.18", ] [[package]] @@ -3882,14 +3892,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.3" +version = "0.38.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac5ffa1efe7548069688cd7028f32591853cd7b5b756d41bcffd2353e4fc75b4" +checksum = "19ed4fa021d81c8392ce04db050a3da9a60299050b7ae1cf482d862b54a7218f" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.0", "errno", "libc", - "linux-raw-sys 0.4.3", + "linux-raw-sys 0.4.5", "windows-sys 0.48.0", ] @@ -3939,9 +3949,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe232bdf6be8c8de797b22184ee71118d63780ea42ac85b61d1baa6d3b782ae9" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" [[package]] name = "safe_arch" @@ -3954,9 +3964,9 @@ dependencies = [ [[package]] name = "safe_arch" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62a7484307bd40f8f7ccbacccac730108f2cae119a3b11c74485b48aa9ea650f" +checksum = "f398075ce1e6a179b46f51bd88d0598b92b00d3551f1a2d4ac49e771b56ac354" dependencies = [ "bytemuck", ] @@ -3978,9 +3988,9 @@ checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" [[package]] name = "scopeguard" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" @@ -4025,9 +4035,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" +checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" [[package]] name = "semver-parser" @@ -4037,31 +4047,31 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.166" +version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01b7404f9d441d3ad40e6a636a7782c377d2abdbe4fa2440e2edcc2f4f10db8" +checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9" dependencies = [ "serde_derive", ] [[package]] name = "serde_bytes" -version = "0.11.11" +version = "0.11.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a16be4fe5320ade08736447e3198294a5ea9a6d44dde6f35f0a5e06859c427a" +checksum = "ab33ec92f677585af6d88c65593ae2375adde54efdbf16d597f2cbc7a6d368ff" dependencies = [ "serde", ] [[package]] name = "serde_derive" -version = "1.0.166" +version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dd83d6dde2b6b2d466e14d9d1acce8816dedee94f735eac6395808b3483c6d6" +checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.29", ] [[package]] @@ -4075,9 +4085,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.100" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f1e14e89be7aa4c4b78bdbdc9eb5bf8517829a600ae8eaa39a6e1d960b5185c" +checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360" dependencies = [ "indexmap 2.0.0", "itoa", @@ -4213,9 +4223,9 @@ dependencies = [ [[package]] name = "signal-hook" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "732768f1176d21d09e076c23a93123d40bba92d50c4058da34d45c8de8e682b9" +checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" dependencies = [ "libc", "signal-hook-registry", @@ -4245,9 +4255,9 @@ dependencies = [ [[package]] name = "simd-adler32" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "238abfbb77c1915110ad968465608b68e869e0772622c9656714e73e5a1a522f" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" [[package]] name = "simple-mutex" @@ -4465,9 +4475,9 @@ checksum = "9e08d8363704e6c71fc928674353e6b7c23dcea9d82d7012c8faf2a3a025f8d0" [[package]] name = "stringprep" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1" +checksum = "db3737bde7edce97102e0e2b15365bf7a20bfdb5f60f4f9e8d7004258a51a8da" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -4502,7 +4512,7 @@ dependencies = [ "log", "mime_guess", "once_cell", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "serde", "serde_json", "web-sys", @@ -4595,9 +4605,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.23" +version = "2.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59fb7d6d8281a51045d62b8eb3a7d1ce347b76f312af50cd3dc0af39c87c1737" +checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" dependencies = [ "proc-macro2", "quote", @@ -4627,22 +4637,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.41" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c16a64ba9387ef3fdae4f9c1a7f07a0997fce91985c0336f1ddc1822b3b37802" +checksum = "97a802ec30afc17eee47b2855fc72e0c4cd62be9b4efe6591edde0ec5bd68d8f" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.41" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d14928354b01c4d6a4f0e549069adef399a284e7995c7ccca94e8a07a5346c59" +checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.29", ] [[package]] @@ -4660,7 +4670,7 @@ dependencies = [ "http-types", "kv-log-macro", "log", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "route-recognizer", "serde", "serde_json", @@ -4668,9 +4678,9 @@ dependencies = [ [[package]] name = "tiff" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7449334f9ff2baf290d55d73983a7d6fa15e01198faef72af07e2a8db851e471" +checksum = "6d172b0f4d3fba17ba89811858b9d3d97f928aece846475bbda076ca46736211" dependencies = [ "flate2", "jpeg-decoder", @@ -4705,14 +4715,15 @@ dependencies = [ [[package]] name = "time" -version = "0.3.22" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea9e1b3cf1243ae005d9e74085d4d542f3125458f3a81af210d901dcd7411efd" +checksum = "a79d09ac6b08c1ab3906a2f7cc2e81a0e27c7ae89c63812df75e52bef0751e07" dependencies = [ + "deranged", "itoa", "serde", "time-core", - "time-macros 0.2.9", + "time-macros 0.2.12", ] [[package]] @@ -4733,9 +4744,9 @@ dependencies = [ [[package]] name = "time-macros" -version = "0.2.9" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" +checksum = "75c65469ed6b3a4809d987a41eb1dc918e9bc1d92211cbad7ae82931846f7451" dependencies = [ "time-core", ] @@ -4795,18 +4806,17 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.29.1" +version = "1.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" +checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" dependencies = [ - "autocfg", "backtrace", "bytes 1.4.0", "libc", "mio", "num_cpus", - "pin-project-lite 0.2.10", - "socket2 0.4.9", + "pin-project-lite 0.2.12", + "socket2 0.5.3", "tokio-macros", "windows-sys 0.48.0", ] @@ -4819,7 +4829,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.29", ] [[package]] @@ -4843,7 +4853,7 @@ dependencies = [ "futures-core", "futures-io", "futures-sink", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "tokio", ] @@ -4864,9 +4874,9 @@ checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" [[package]] name = "toml_edit" -version = "0.19.12" +version = "0.19.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c500344a19072298cd05a7224b3c0c629348b78692bf48466c5238656e315a78" +checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" dependencies = [ "indexmap 2.0.0", "toml_datetime", @@ -4881,7 +4891,7 @@ checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", - "pin-project-lite 0.2.10", + "pin-project-lite 0.2.12", "tracing-attributes", "tracing-core", ] @@ -4894,7 +4904,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.29", ] [[package]] @@ -5018,9 +5028,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22049a19f4a68748a168c0fc439f9516686aa045927ff767eca0a85101fb6e73" +checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" [[package]] name = "unicode-normalization" @@ -5095,9 +5105,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d023da39d1fde5a8a3fe1f3e01ca9632ada0a63e9797de55a879d6e2236277be" +checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" dependencies = [ "getrandom 0.2.10", "serde", @@ -5202,7 +5212,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.29", "wasm-bindgen-shared", ] @@ -5236,7 +5246,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.23", + "syn 2.0.29", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5414,7 +5424,7 @@ dependencies = [ "js-sys", "log", "naga", - "parking_lot 0.11.2", + "parking_lot 0.12.1", "profiling", "raw-window-handle 0.5.2", "smallvec", @@ -5440,7 +5450,7 @@ dependencies = [ "fxhash", "log", "naga", - "parking_lot 0.11.2", + "parking_lot 0.12.1", "profiling", "raw-window-handle 0.5.2", "smallvec", @@ -5466,7 +5476,7 @@ dependencies = [ "d3d12", "foreign-types 0.3.2", "fxhash", - "glow 0.12.2", + "glow 0.12.3", "gpu-alloc", "gpu-allocator", "gpu-descriptor", @@ -5479,7 +5489,7 @@ dependencies = [ "metal", "naga", "objc", - "parking_lot 0.11.2", + "parking_lot 0.12.1", "profiling", "range-alloc", "raw-window-handle 0.5.2", @@ -5517,12 +5527,12 @@ dependencies = [ [[package]] name = "wide" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40018623e2dba2602a9790faba8d33f2ebdebf4b86561b83928db735f8784728" +checksum = "aa469ffa65ef7e0ba0f164183697b89b854253fd31aeb92358b7b6155177d62f" dependencies = [ "bytemuck", - "safe_arch 0.7.0", + "safe_arch 0.7.1", ] [[package]] @@ -5606,7 +5616,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows-targets 0.48.1", + "windows-targets 0.48.5", ] [[package]] @@ -5628,7 +5638,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.1", + "windows-targets 0.48.5", ] [[package]] @@ -5648,17 +5658,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.48.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm 0.48.0", - "windows_aarch64_msvc 0.48.0", - "windows_i686_gnu 0.48.0", - "windows_i686_msvc 0.48.0", - "windows_x86_64_gnu 0.48.0", - "windows_x86_64_gnullvm 0.48.0", - "windows_x86_64_msvc 0.48.0", + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", ] [[package]] @@ -5669,9 +5679,9 @@ checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_msvc" @@ -5687,9 +5697,9 @@ checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_i686_gnu" @@ -5705,9 +5715,9 @@ checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_msvc" @@ -5723,9 +5733,9 @@ checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_x86_64_gnu" @@ -5741,9 +5751,9 @@ checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnullvm" @@ -5753,9 +5763,9 @@ checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_msvc" @@ -5771,9 +5781,9 @@ checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winit" @@ -5810,9 +5820,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.4.8" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9482fe6ceabdf32f3966bfdd350ba69256a97c30253dc616fe0005af24f164e" +checksum = "d09770118a7eb1ccaf4a594a221334119a44a814fcb0d31c5b85e83e97227a97" dependencies = [ "memchr", ] @@ -5885,9 +5895,9 @@ checksum = "a67300977d3dc3f8034dae89778f502b6ba20b269527b3223ba59c0cf393bb8a" [[package]] name = "xml-rs" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a56c84a8ccd4258aed21c92f70c0f6dea75356b6892ae27c24139da456f9336" +checksum = "47430998a7b5d499ccee752b41567bc3afc57e1327dc855b1a2aa44ce29b5fa1" [[package]] name = "zune-inflate" diff --git a/Cargo.toml b/Cargo.toml index e8a21e1..6cdb906 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,3 @@ members = [ "tools/beetle-mock", "tools/wchgen" ] - -[patch.crates-io] -kramer = { git = "https://github.com/sizethree/kramer.git", branch = "async-read" } diff --git a/demo-layout.json b/src/beetle-srv/.example-layouts/demo-layout.json similarity index 100% rename from demo-layout.json rename to src/beetle-srv/.example-layouts/demo-layout.json diff --git a/event-layout.json b/src/beetle-srv/.example-layouts/event-layout.json similarity index 100% rename from event-layout.json rename to src/beetle-srv/.example-layouts/event-layout.json diff --git a/src/beetle-srv/Cargo.toml b/src/beetle-srv/Cargo.toml index fd20081..7d46035 100644 --- a/src/beetle-srv/Cargo.toml +++ b/src/beetle-srv/Cargo.toml @@ -50,6 +50,3 @@ url = { version = "^2.3.1" } # Enabling this feature will allow developers to use a "naked" tcp stream for redis connections, instead of # the default, which is to use `async_tls`. redis-insecure = [] - -[patch.crates-io] -kramer = { git = "https://github.com/sizethree/kramer.git", branch = "async-read" } diff --git a/src/beetle-srv/env.example.toml b/src/beetle-srv/env.example.toml index 4f38af6..fa57d87 100644 --- a/src/beetle-srv/env.example.toml +++ b/src/beetle-srv/env.example.toml @@ -3,7 +3,7 @@ host = "0.0.0.0" port = 6379 [mongo] -url = "" +url = "mongodb+srv://..." database = "" [mongo.collections] @@ -11,6 +11,9 @@ users = "" device_diagnostics = "" device_authorities = "" device_schedules = "" +device_histories = "" +device_states = "" +migrations = "" [registrar] id_consumer_username = "" @@ -18,8 +21,14 @@ id_consumer_password = "" registration_pool_minimum = 1 vendor_api_secret = "" acl_user_allowlist = ["default"] -active_device_chunk_size = 10 initial_scannable_addr = "" +interval_delay_ms = 500 +active_device_chunk_size = 10 +device_schedule_refresh_interval_seconds = 15 + +[registrar.analytics_configuration] +kind = "" +content = { api_key = "", account_id = "" } [web] cookie_domain = "" @@ -27,16 +36,13 @@ session_secret = "" session_cookie = "" ui_redirect = "" -[auth0] -client_id="" -client_secret="" -redirect_uri="" -auth_uri="" -token_uri="" -info_uri="" - [google] client_id="" client_secret="" redirect_uri="" -scopes=[] +scopes=[ + "openid", + "https://www.googleapis.com/auth/calendar.readonly", + "https://www.googleapis.com/auth/userinfo.profile", + "https://www.googleapis.com/auth/calendar.events.readonly" +] diff --git a/src/beetle-srv/src/api/auth/google.rs b/src/beetle-srv/src/api/auth/google.rs index a79d1bb..093cdb8 100644 --- a/src/beetle-srv/src/api/auth/google.rs +++ b/src/beetle-srv/src/api/auth/google.rs @@ -1,7 +1,7 @@ //! This module is currently in the process of replacing the Auth0-based module defined in the //! parent directory. Some of the code in here is repetetive while that is being phases out. -use crate::schema; +use crate::{registrar, schema}; use anyhow::Context; /// This value is how auth0 "tags" ids during its oauth handshake. It will be added for all users @@ -29,7 +29,7 @@ pub async fn redirect(request: tide::Request) -> tide::Resul pub async fn complete(request: tide::Request) -> tide::Result { let query = request.query::()?; let worker = request.state(); - log::trace!("have code - '{}'", query.code); + log::debug!("have code - '{}'", query.code); let mut response = surf::post("https://oauth2.googleapis.com/token") .body_json(&crate::vendor::google::TokenRequest { @@ -55,7 +55,11 @@ pub async fn complete(request: tide::Request) -> tide::Resul created: chrono::Utc::now(), token: parsed, }; - let userinfo = crate::vendor::google::fetch_user(&handle).await?; + + let userinfo = crate::vendor::google::fetch_user(&handle).await.map_err(|error| { + log::error!("unable to fetch user info during oauth completion - {error}"); + error + })?; let normalized_id = format!("{GOOGLE_ID_PREFIX}{}", userinfo.id); let query = bson::doc! { "oid": &normalized_id }; @@ -101,7 +105,10 @@ pub async fn complete(request: tide::Request) -> tide::Resul let jwt = crate::api::claims::Claims::for_user(&user.oid).encode(&worker.web_configuration.session_secret)?; if let Err(error) = worker - .queue_job(crate::registrar::RegistrarJob::access_token_refresh(handle, user.oid)) + .queue_job_kind(registrar::RegistrarJobKind::UserAccessTokenRefresh { + handle, + user_id: user.oid, + }) .await { log::warn!("unable to queued refresh token request - {error}"); diff --git a/src/beetle-srv/src/api/devices.rs b/src/beetle-srv/src/api/devices.rs index 95a0b61..1fcbb23 100644 --- a/src/beetle-srv/src/api/devices.rs +++ b/src/beetle-srv/src/api/devices.rs @@ -1,4 +1,4 @@ -use crate::schema; +use crate::{registrar, schema}; use serde::{Deserialize, Serialize}; /// The payload for looking up a device by id. @@ -148,7 +148,7 @@ pub async fn unregister(mut request: tide::Request) -> ti let worker = request.state(); let users = worker.users_collection()?; - let mut user = worker.request_authority(&request).await?.ok_or_else(|| { + let user = worker.request_authority(&request).await?.ok_or_else(|| { log::warn!("device unregister -> no user found"); tide::Error::from_str(404, "missing-user") })?; @@ -158,50 +158,52 @@ pub async fn unregister(mut request: tide::Request) -> ti tide::Error::from_str(422, "bad-payload") })?; - match user.devices.take() { - Some(mut device_map) => { - log::trace!("device unregister -> found device map - {device_map:?}"); - - if device_map.remove(&payload.device_id).is_none() { - return Ok(tide::Response::builder(422).build()); - } - - // Update our user handle - let oid = user.oid.clone(); - let query = bson::doc! { "oid": &oid }; - let updated = schema::User { - devices: Some(device_map), - ..user - }; - let options = mongodb::options::FindOneAndUpdateOptions::builder() - .upsert(true) - .return_document(mongodb::options::ReturnDocument::After) - .build(); - - // Persist update into mongo - users - .find_one_and_update( - query, - bson::doc! { "$set": bson::to_bson(&updated).map_err(|error| { - log::warn!("unable to serialize user update - {error}"); - tide::Error::from_str(500, "user-failure") - })? }, - options, - ) - .await - .map_err(|error| { - log::warn!("unable to create new user - {:?}", error); - tide::Error::from_str(500, "user-failure") - })?; - - log::info!("user '{}' unregistered device '{}'", oid, payload.device_id); - Ok(tide::Response::builder(200).build()) - } - None => { - log::warn!("user has no devices, not found"); - Ok(tide::Response::builder(422).build()) - } + let mut device_map = user.devices.ok_or_else(|| { + log::warn!("user '{}' had no device map", user.oid); + tide::Error::from_str(404, "missing-user") + })?; + + log::trace!("device unregister -> found device map - {device_map:?}"); + + if device_map.remove(&payload.device_id).is_none() { + log::warn!( + "was unable to remove device '{}' from user '{}'", + payload.device_id, + user.oid + ); + return Ok(tide::Response::builder(422).build()); } + + // Update our user handle + let oid = user.oid.clone(); + let query = bson::doc! { "oid": &oid }; + let updated = schema::User { + devices: Some(device_map), + ..user + }; + let options = mongodb::options::FindOneAndUpdateOptions::builder() + .upsert(true) + .return_document(mongodb::options::ReturnDocument::After) + .build(); + + // Persist update into mongo + users + .find_one_and_update( + query, + bson::doc! { "$set": bson::to_bson(&updated).map_err(|error| { + log::warn!("unable to serialize user update - {error}"); + tide::Error::from_str(500, "user-failure") + })? }, + options, + ) + .await + .map_err(|error| { + log::warn!("unable to create new user - {:?}", error); + tide::Error::from_str(500, "user-failure") + })?; + + log::info!("user '{}' unregistered device '{}'", oid, payload.device_id); + Ok(tide::Response::builder(200).build()) } /// Route: register @@ -222,10 +224,13 @@ pub async fn register(mut request: tide::Request) -> tide })?; let job_id = worker - .queue_job(crate::registrar::RegistrarJob::device_ownership( - &user.oid, - &payload.device_id, + .queue_job_kind(registrar::RegistrarJobKind::Ownership( + registrar::ownership::DeviceOwnershipRequest { + device_id: payload.device_id.clone(), + user_id: user.oid.clone(), + }, )) + // registrar::RegistrarJob::device_ownership(&user.oid, &payload.device_id)) .await?; log::info!( diff --git a/src/beetle-srv/src/api/jobs.rs b/src/beetle-srv/src/api/jobs.rs index 7f970b5..7a3848c 100644 --- a/src/beetle-srv/src/api/jobs.rs +++ b/src/beetle-srv/src/api/jobs.rs @@ -94,13 +94,14 @@ pub async fn queue(mut request: tide::Request) -> tide::R return Err(tide::Error::from_str(400, "not-found")); } + let device_id = queue_payload.device_id.clone(); + log::info!( - "user '{}' creating message for device - {:?}", + "user '{}' creating message for device '{device_id}' - {:?}", user.oid, queue_payload.kind ); - let device_id = queue_payload.device_id.clone(); let layout = match queue_payload.kind { kind @ QueuePayloadKind::MakePublic | kind @ QueuePayloadKind::MakePrivate => { let privacy = match kind { @@ -109,8 +110,10 @@ pub async fn queue(mut request: tide::Request) -> tide::R _ => return Ok(tide::Error::from_str(422, "bad transition").into()), }; - let job = registrar::RegistrarJob::set_public_availability(device_id, privacy); - let id = worker.queue_job(job).await?; + let job = registrar::RegistrarJobKind::OwnershipChange( + registrar::ownership::DeviceOwnershipChangeRequest::SetPublicAvailability(device_id, privacy), + ); + let id = worker.queue_job_kind(job).await?; return tide::Body::from_json(&QueueResponse { id }).map(|body| tide::Response::builder(200).body(body).build()); } @@ -130,6 +133,7 @@ pub async fn queue(mut request: tide::Request) -> tide::R } QueuePayloadKind::Refresh => { + log::debug!("refreshing device state for '{device_id}'"); let job = registrar::RegistrarJobKind::Renders(registrar::jobs::RegistrarRenderKinds::CurrentDeviceState(device_id)); let id = worker.queue_job_kind(job).await?; @@ -138,8 +142,9 @@ pub async fn queue(mut request: tide::Request) -> tide::R // Attempt to queue the large, scannable QR code for registering this define. QueuePayloadKind::Registration => { - let job = registrar::RegistrarJob::registration_scannable(device_id); - let id = worker.queue_job(job).await?; + let job = + registrar::RegistrarJobKind::Renders(registrar::jobs::RegistrarRenderKinds::RegistrationScannable(device_id)); + let id = worker.queue_job_kind(job).await?; return tide::Body::from_json(&QueueResponse { id }).map(|body| tide::Response::builder(200).body(body).build()); } @@ -158,8 +163,12 @@ pub async fn queue(mut request: tide::Request) -> tide::R return tide::Body::from_json(&QueueResponse { id }).map(|body| tide::Response::builder(200).body(body).build()); } QueuePayloadKind::Rename(new_name) => { - let job = registrar::RegistrarJob::rename_device(device_id, new_name.clone()); - let id = worker.queue_job(job).await?; + let id = worker + .queue_job_kind(registrar::RegistrarJobKind::Rename(registrar::DeviceRenameRequest { + device_id, + new_name, + })) + .await?; return tide::Body::from_json(&QueueResponse { id }).map(|body| tide::Response::builder(200).body(body).build()); } @@ -169,7 +178,7 @@ pub async fn queue(mut request: tide::Request) -> tide::R let origin = user .nickname .or(user.name) - .map(schema::DeviceStateMessageOrigin::User) + .map(|name| schema::DeviceStateMessageOrigin::User { nickname: name }) .unwrap_or_else(|| schema::DeviceStateMessageOrigin::Unknown); let id = worker @@ -191,6 +200,8 @@ pub async fn queue(mut request: tide::Request) -> tide::R QueuePayloadKind::Link(scannable_link) => crate::rendering::RenderVariant::scannable(scannable_link), }; + log::debug!("immediately requesting render for '{device_id}' from api"); + let request_id = worker .queue_render(&device_id, &user.oid, layout) .await diff --git a/src/beetle-srv/src/api/mod.rs b/src/beetle-srv/src/api/mod.rs index 5c3e2f5..79cf889 100644 --- a/src/beetle-srv/src/api/mod.rs +++ b/src/beetle-srv/src/api/mod.rs @@ -78,16 +78,11 @@ impl Default for HeartbeatPayload { /// An api route to verify uptime/availability. async fn heartbeat(_request: tide::Request) -> tide::Result { - Ok( - tide::Response::builder(200) - .body(tide::Body::from_json(&HeartbeatPayload::default())?) - .build(), - ) + tide::Body::from_json(&HeartbeatPayload::default()).map(|body| tide::Response::builder(200).body(body).build()) } /// The 404 handler. async fn missing(_request: tide::Request) -> tide::Result { - log::debug!("not-found"); Ok(tide::Response::builder(404).build()) } diff --git a/src/beetle-srv/src/api/worker.rs b/src/beetle-srv/src/api/worker.rs index 8a1753d..69d284a 100644 --- a/src/beetle-srv/src/api/worker.rs +++ b/src/beetle-srv/src/api/worker.rs @@ -60,7 +60,7 @@ impl Worker { /// Will attempt to queue various registrar jobs by serializing them and pushing the job onto our /// job queue redis list. During this process we will encrypt the actual job. - pub(super) async fn queue_job(&self, job: crate::registrar::RegistrarJob) -> Result { + async fn queue_job(&self, job: crate::registrar::RegistrarJob) -> Result { // TODO: this is where id generation should happen, not in the job construction itself. let id = job.id.clone(); @@ -109,7 +109,8 @@ impl Worker { let mut redis_connection = self.get_redis_lock().await?; if let Some(ref mut connection) = *redis_connection { - let mut queue = crate::rendering::queue::Queue::new(connection); + let mut queue = + crate::rendering::queue::Queue::new(connection, &self.registrar_configuration.vendor_api_secret); let result = queue .queue( diff --git a/src/beetle-srv/src/bin/beetle-cli.rs b/src/beetle-srv/src/bin/beetle-cli.rs index 06f3861..ab813a3 100644 --- a/src/beetle-srv/src/bin/beetle-cli.rs +++ b/src/beetle-srv/src/bin/beetle-cli.rs @@ -52,6 +52,13 @@ enum CommandLineCommand { /// Prints the length of a device message queue. PrintItems(cli::SingleDeviceCommand), + + /// Do migration things. + Migrate { + /// The operation + #[clap(value_enum)] + kind: cli::migrate::MigrateOp, + }, } /// The command line options themselves. @@ -74,6 +81,7 @@ async fn run(config: cli::CommandLineConfig, command: CommandLineCommand) -> io: println!(" mongofb: {}", config.mongo.url); println!("=="); match command { + CommandLineCommand::Migrate { kind } => cli::migrate::run(&config, kind).await, CommandLineCommand::DropCollections => { let mongo = beetle::mongo::connect_mongo(&config.mongo).await?; mongo @@ -108,7 +116,7 @@ async fn run(config: cli::CommandLineConfig, command: CommandLineCommand) -> io: CommandLineCommand::Lighten(inner) => (&inner.id, beetle::rendering::RenderVariant::on()), _ => unreachable!(), }; - let mut queue = beetle::rendering::Queue::new(&mut stream); + let mut queue = beetle::rendering::Queue::new(&mut stream, &config.registrar.vendor_api_secret); let (request_id, pending) = queue .queue::<&str, &str>(id, &beetle::rendering::QueuedRenderAuthority::CommandLine, inner) .await?; diff --git a/src/beetle-srv/src/bin/beetle-registrar.rs b/src/beetle-srv/src/bin/beetle-registrar.rs index c4e82eb..8b97040 100644 --- a/src/beetle-srv/src/bin/beetle-registrar.rs +++ b/src/beetle-srv/src/bin/beetle-registrar.rs @@ -15,18 +15,21 @@ async fn run(config: beetle::registrar::Configuration) -> Result<()> { option_env!("BEETLE_VERSION").unwrap_or_else(|| "dev") ); - let mut worker = config.worker().await?; let mut failures = Vec::with_capacity(3); - let mut interval = async_std::stream::interval(std::time::Duration::from_millis(1000)); + let interval_ms = config.registrar.interval_delay_ms.as_ref().copied(); + let mut interval = interval_ms + .map(std::time::Duration::from_millis) + .map(async_std::stream::interval); + let mut last_debug = std::time::Instant::now(); let mut frames = 0u8; + let mut worker = config.worker().await?; while failures.len() < 10 { - interval.next().await; log::trace!("attempting worker frame"); let now = std::time::Instant::now(); - if now.duration_since(last_debug).as_secs() > 4 || frames == 255 { + if now.duration_since(last_debug).as_secs() > 4 || frames == u8::MAX { last_debug = now; log::info!("registar still working ({frames} frames since last interval)..."); frames = 0; @@ -42,6 +45,11 @@ async fn run(config: beetle::registrar::Configuration) -> Result<()> { } } } + + if let Some(interval) = interval.as_mut() { + log::trace!("explicit registrar execution delay - {:?}", interval_ms); + interval.next().await; + } } log::warn!("registrar exiting with failures - {failures:?}"); diff --git a/src/beetle-srv/src/bin/cli/messages.rs b/src/beetle-srv/src/bin/cli/messages.rs index 858ea3b..d7681bd 100644 --- a/src/beetle-srv/src/bin/cli/messages.rs +++ b/src/beetle-srv/src/bin/cli/messages.rs @@ -89,7 +89,7 @@ pub async fn send_layout(config: &super::CommandLineConfig, command: SendLayoutC if let Some(device_id) = &command.id { let mut stream = beetle::redis::connect(&config.redis).await?; - let mut queue = beetle::rendering::Queue::new(&mut stream); + let mut queue = beetle::rendering::Queue::new(&mut stream, &config.registrar.vendor_api_secret); let (request_id, pending) = queue .queue( device_id, @@ -135,7 +135,7 @@ pub async fn send_scannable(config: &super::CommandLineConfig, command: SendScan } let request = beetle::rendering::RenderVariant::scannable(&command.content); - let mut queue = beetle::rendering::Queue::new(&mut stream); + let mut queue = beetle::rendering::Queue::new(&mut stream, &config.registrar.vendor_api_secret); let (request_id, pending) = queue .queue( &command.id, @@ -195,7 +195,7 @@ pub async fn send_image(config: &super::CommandLineConfig, command: SendImageCom if let Some(device_id) = &command.id { let mut stream = beetle::redis::connect(&config.redis).await?; - let mut queue = beetle::rendering::Queue::new(&mut stream); + let mut queue = beetle::rendering::Queue::new(&mut stream, &config.registrar.vendor_api_secret); let (request_id, pending) = queue .queue( device_id, diff --git a/src/beetle-srv/src/bin/cli/migrate/migrations/m0.rs b/src/beetle-srv/src/bin/cli/migrate/migrations/m0.rs new file mode 100644 index 0000000..48c9d0d --- /dev/null +++ b/src/beetle-srv/src/bin/cli/migrate/migrations/m0.rs @@ -0,0 +1,285 @@ +#![allow(clippy::missing_docs_in_private_items)] + +//! Migration note: This migration is responsible for the breaking changes made to the +//! `device_authorities` and `device_states` collections, which were converted to use nested +//! structs as their enum datum (instead of tuple fields). + +use super::super::ops::from_to; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Copy, Deserialize, Serialize)] +#[serde(rename_all = "snake_case", tag = "migration:name")] +pub struct Migration {} + +impl Migration { + pub async fn up(&self, config: &crate::cli::CommandLineConfig) -> anyhow::Result<()> { + log::debug!("moving from old to new"); + from_to(config, &config.mongo.collections.device_schedules, up_schedule).await?; + from_to(config, &config.mongo.collections.device_states, up_state).await?; + from_to(config, &config.mongo.collections.device_authorities, up_auth).await + } + + pub async fn down(&self, config: &crate::cli::CommandLineConfig) -> anyhow::Result<()> { + from_to(config, &config.mongo.collections.device_schedules, down_schedule).await?; + from_to(config, &config.mongo.collections.device_states, down_state).await?; + from_to(config, &config.mongo.collections.device_authorities, down_auth).await + } +} + +fn up_auth(record: OriginDeviceAuthorityRecord) -> (TargetDeviceAuthorityRecord, bson::Document) { + let updated = TargetDeviceAuthorityRecord { + device_id: record.device_id.clone(), + authority_model: record.authority_model.map(|model| match model { + OriginDeviceAuthorityModel::Exclusive(owner) => TargetDeviceAuthorityModel::Exclusive { owner }, + OriginDeviceAuthorityModel::Shared(owner, _) => TargetDeviceAuthorityModel::Shared { owner, guests: vec![] }, + OriginDeviceAuthorityModel::Public(owner, _) => TargetDeviceAuthorityModel::Public { owner, guests: vec![] }, + }), + }; + + (updated, bson::doc! { "device_id": record.device_id }) +} + +fn down_auth(record: TargetDeviceAuthorityRecord) -> (OriginDeviceAuthorityRecord, bson::Document) { + let updated = OriginDeviceAuthorityRecord { + device_id: record.device_id.clone(), + authority_model: record.authority_model.map(|model| match model { + TargetDeviceAuthorityModel::Exclusive { owner } => OriginDeviceAuthorityModel::Exclusive(owner), + TargetDeviceAuthorityModel::Shared { owner, .. } => OriginDeviceAuthorityModel::Exclusive(owner), + TargetDeviceAuthorityModel::Public { owner, .. } => OriginDeviceAuthorityModel::Exclusive(owner), + }), + }; + + (updated, bson::doc! { "device_id": record.device_id }) +} + +fn up_schedule(record: OriginDeviceSchedule) -> (TargetDeviceSchedule, bson::Document) { + let updated = TargetDeviceSchedule { + device_id: record.device_id.clone(), + last_executed: record.last_executed, + kind: record.kind.map(|k| match k { + OriginDeviceScheduleKind::UserEventsBasic(id) => TargetDeviceScheduleKind::UserEventsBasic { user_oid: id }, + }), + }; + log::debug!("migrating UP TO schedule '{updated:?}'"); + + (updated, bson::doc! { "device_id": record.device_id }) +} + +fn down_schedule(record: TargetDeviceSchedule) -> (OriginDeviceSchedule, bson::Document) { + let updated = OriginDeviceSchedule { + device_id: record.device_id.clone(), + last_executed: record.last_executed, + kind: record.kind.map(|k| match k { + TargetDeviceScheduleKind::UserEventsBasic { user_oid } => OriginDeviceScheduleKind::UserEventsBasic(user_oid), + }), + }; + log::debug!("migrating DOWN TO schedule '{updated:?}'"); + + (updated, bson::doc! { "device_id": record.device_id }) +} + +fn up_state(record: OriginDeviceState) -> (TargetDeviceState, bson::Document) { + let updated = TargetDeviceState { + updated_at: record.updated_at, + device_id: record.device_id.clone(), + rendering: record.rendering.map(|old| match old { + OriginDeviceRenderingState::MessageList(messages) => { + let messages = messages + .into_iter() + .map(|m| TargetDeviceRenderingStateMessageEntry { + content: m.content, + origin: match m.origin { + OriginDeviceStateMessageOrigin::Unknown => TargetDeviceStateMessageOrigin::Unknown, + OriginDeviceStateMessageOrigin::User(id) => TargetDeviceStateMessageOrigin::User { nickname: id }, + }, + timestamp: m.timestamp, + }) + .collect(); + TargetDeviceRenderingState::MessageList { messages } + } + OriginDeviceRenderingState::ScheduleLayout(events, messages) => { + let messages = messages + .into_iter() + .map(|m| TargetDeviceRenderingStateMessageEntry { + content: m.content, + origin: match m.origin { + OriginDeviceStateMessageOrigin::Unknown => TargetDeviceStateMessageOrigin::Unknown, + OriginDeviceStateMessageOrigin::User(id) => TargetDeviceStateMessageOrigin::User { nickname: id }, + }, + timestamp: m.timestamp, + }) + .collect(); + + TargetDeviceRenderingState::ScheduleLayout { events, messages } + } + }), + }; + + (updated, bson::doc! { "device_id": record.device_id }) +} + +fn down_state(record: TargetDeviceState) -> (OriginDeviceState, bson::Document) { + let updated = OriginDeviceState { + updated_at: record.updated_at, + device_id: record.device_id.clone(), + rendering: record.rendering.map(|old| match old { + TargetDeviceRenderingState::MessageList { messages } => { + let messages = messages + .into_iter() + .map(|m| OriginDeviceRenderingStateMessageEntry { + content: m.content, + origin: match m.origin { + TargetDeviceStateMessageOrigin::Unknown => OriginDeviceStateMessageOrigin::Unknown, + TargetDeviceStateMessageOrigin::User { nickname } => OriginDeviceStateMessageOrigin::User(nickname), + }, + timestamp: m.timestamp, + }) + .collect(); + OriginDeviceRenderingState::MessageList(messages) + } + TargetDeviceRenderingState::ScheduleLayout { events, messages } => { + let messages = messages + .into_iter() + .map(|m| OriginDeviceRenderingStateMessageEntry { + content: m.content, + origin: match m.origin { + TargetDeviceStateMessageOrigin::Unknown => OriginDeviceStateMessageOrigin::Unknown, + TargetDeviceStateMessageOrigin::User { nickname } => OriginDeviceStateMessageOrigin::User(nickname), + }, + timestamp: m.timestamp, + }) + .collect(); + + OriginDeviceRenderingState::ScheduleLayout(events, messages) + } + }), + }; + + (updated, bson::doc! { "device_id": record.device_id }) +} + +#[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(rename_all = "snake_case", tag = "beetle:kind", content = "beetle:content")] +pub enum TargetDeviceStateMessageOrigin { + Unknown, + User { nickname: String }, +} + +#[derive(Debug, Deserialize, Serialize)] +#[serde(rename_all = "snake_case")] +struct TargetDeviceRenderingStateMessageEntry { + content: String, + origin: TargetDeviceStateMessageOrigin, + timestamp: Option>, +} + +#[derive(Debug, Deserialize, Serialize)] +#[serde(rename_all = "snake_case", tag = "beetle:kind", content = "beetle:content")] +enum TargetDeviceRenderingState { + ScheduleLayout { + events: Vec, + messages: Vec, + }, + MessageList { + messages: Vec, + }, +} + +#[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(rename_all = "snake_case", tag = "beetle:kind", content = "beetle:content")] +enum OriginDeviceStateMessageOrigin { + Unknown, + User(String), +} + +#[derive(Debug, Deserialize, Serialize)] +#[serde(rename_all = "snake_case")] +struct OriginDeviceRenderingStateMessageEntry { + content: String, + origin: OriginDeviceStateMessageOrigin, + timestamp: Option>, +} + +#[derive(Debug, Deserialize, Serialize)] +#[serde(rename_all = "snake_case", tag = "beetle:kind", content = "beetle:content")] +enum OriginDeviceRenderingState { + ScheduleLayout( + Vec, + Vec, + ), + MessageList(Vec), +} + +#[derive(Debug, Deserialize, Serialize)] +#[serde(rename_all = "snake_case")] +struct TargetDeviceState { + device_id: String, + updated_at: Option>, + rendering: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +#[serde(rename_all = "snake_case")] +struct OriginDeviceState { + device_id: String, + updated_at: Option>, + rendering: Option, +} + +#[derive(Deserialize, Serialize, Debug)] +#[serde(rename_all = "snake_case", tag = "beetle:kind", content = "beetle:content")] +enum OriginDeviceAuthorityModel { + Exclusive(String), + Shared(String, Vec), + Public(String, Vec), +} + +#[derive(Deserialize, Serialize, Debug, Default)] +#[serde(rename_all = "snake_case")] +struct OriginDeviceAuthorityRecord { + device_id: String, + authority_model: Option, +} + +#[derive(Deserialize, Serialize, Debug)] +#[serde(rename_all = "snake_case", tag = "beetle:kind", content = "beetle:content")] +enum TargetDeviceAuthorityModel { + Exclusive { owner: String }, + Shared { owner: String, guests: Vec }, + Public { owner: String, guests: Vec }, +} + +#[derive(Deserialize, Serialize, Debug, Default)] +#[serde(rename_all = "snake_case")] +struct TargetDeviceAuthorityRecord { + device_id: String, + authority_model: Option, +} + +#[derive(Deserialize, Serialize, Debug)] +#[serde(rename_all = "snake_case", tag = "beetle:kind", content = "beetle:content")] +enum OriginDeviceScheduleKind { + UserEventsBasic(String), +} + +#[derive(Deserialize, Serialize, Debug, Default)] +#[serde(rename_all = "snake_case")] +struct OriginDeviceSchedule { + device_id: String, + last_executed: Option, + kind: Option, +} + +#[derive(Deserialize, Serialize, Debug)] +#[serde(rename_all = "snake_case", tag = "beetle:kind", content = "beetle:content")] +enum TargetDeviceScheduleKind { + UserEventsBasic { user_oid: String }, +} + +#[derive(Deserialize, Serialize, Debug, Default)] +#[serde(rename_all = "snake_case")] +struct TargetDeviceSchedule { + device_id: String, + last_executed: Option, + kind: Option, +} diff --git a/src/beetle-srv/src/bin/cli/migrate/migrations/mod.rs b/src/beetle-srv/src/bin/cli/migrate/migrations/mod.rs new file mode 100644 index 0000000..aa43bff --- /dev/null +++ b/src/beetle-srv/src/bin/cli/migrate/migrations/mod.rs @@ -0,0 +1,2 @@ +#![allow(clippy::missing_docs_in_private_items)] +pub mod m0; diff --git a/src/beetle-srv/src/bin/cli/migrate/mod.rs b/src/beetle-srv/src/bin/cli/migrate/mod.rs new file mode 100644 index 0000000..235c775 --- /dev/null +++ b/src/beetle-srv/src/bin/cli/migrate/mod.rs @@ -0,0 +1,122 @@ +#![allow(clippy::missing_docs_in_private_items)] + +//! Note: this current implementation of "migrations" only exists to unblock schema refinement +//! during the initial, MVP phase of this project. There are some benefits to doing it this way +//! where the migration authors have lots of control, but the boilerplate and scalability here is +//! questionable. +//! +//! Effectively, each migration would be implemented as a new variant to our `Migration` enum, +//! where the appropriate "solution" is added to the `up`/`down` method and implemented in the +//! `migrations` module. +//! +//! The migrations define their entire own schema for the before/after and should _not_ use the +//! schema provided by the crate. + +use serde::{Deserialize, Serialize}; +use std::io; + +mod migrations; +mod ops; + +#[derive(Debug, Clone, Copy, Deserialize, Serialize)] +#[serde(rename_all = "snake_case", tag = "migration:name")] +pub enum Migration { + M0, +} + +impl Migration { + async fn up(&self, config: &super::CommandLineConfig) -> anyhow::Result<()> { + match self { + Self::M0 => migrations::m0::Migration {}.up(config).await, + } + } + async fn down(&self, config: &super::CommandLineConfig) -> anyhow::Result<()> { + match self { + Self::M0 => migrations::m0::Migration {}.down(config).await, + } + } +} + +#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, clap::ValueEnum)] +pub enum MigrateOp { + Up, + UpForce, + Down, + DownForce, +} + +#[derive(Debug, Deserialize, Serialize)] +struct MigrationHistory { + runs: Vec, +} + +pub async fn run(config: &super::CommandLineConfig, dir: MigrateOp) -> io::Result<()> { + let mongo = beetle::mongo::connect_mongo(&config.mongo).await?; + let collection = mongo + .database(&config.mongo.database) + .collection::(&config.mongo.collections.migrations); + + let full_list = vec![Migration::M0]; + + let first = collection + .find_one(bson::doc! { "runs": { "$exists": 1 } }, None) + .await + .map_err(|error| io::Error::new(io::ErrorKind::Other, error.to_string()))? + .unwrap_or(MigrationHistory { runs: vec![] }); + + let mut run = std::collections::HashSet::new(); + + for alread_run in first.runs { + let serialized = serde_json::to_string(&alread_run)?; + run.insert(serialized); + } + + let mut new_list = vec![]; + let mut migration_count = 0; + + for migration in full_list { + let serialized = serde_json::to_string(&migration)?; + match (run.contains(&serialized), dir) { + (_, MigrateOp::UpForce) | (false, MigrateOp::Up) => { + log::info!("running UP '{serialized}'"); + migration_count += 1; + + migration + .up(config) + .await + .map_err(|error| io::Error::new(io::ErrorKind::Other, error.to_string()))?; + new_list.push(migration); + } + (_, MigrateOp::DownForce) | (true, MigrateOp::Down) => { + log::info!("running DOWN '{serialized}'"); + migration_count += 1; + migration + .down(config) + .await + .map_err(|error| io::Error::new(io::ErrorKind::Other, error.to_string()))?; + } + // If we already ran this but we're going up, just make sure to retain it in the updated list + // of migrations. + (true, MigrateOp::Up) => { + new_list.push(migration); + } + _ => continue, + } + } + + log::info!("migrations complete, {migration_count} were executed"); + + collection + .find_one_and_replace( + bson::doc! { "runs": { "$exists": 1 } }, + MigrationHistory { runs: new_list }, + mongodb::options::FindOneAndReplaceOptions::builder() + .upsert(true) + .return_document(mongodb::options::ReturnDocument::After) + .build(), + ) + .await + .map_err(|error| io::Error::new(io::ErrorKind::Other, error.to_string()))?; + + Ok(()) +} diff --git a/src/beetle-srv/src/bin/cli/migrate/ops.rs b/src/beetle-srv/src/bin/cli/migrate/ops.rs new file mode 100644 index 0000000..197a4ce --- /dev/null +++ b/src/beetle-srv/src/bin/cli/migrate/ops.rs @@ -0,0 +1,50 @@ +use anyhow::Context; +use async_std::stream::StreamExt; + +pub(super) async fn from_to( + config: &crate::cli::CommandLineConfig, + collection: S, + mapper: F, +) -> anyhow::Result<()> +where + S: AsRef, + F: Fn(O) -> (T, bson::Document), + O: for<'a> serde::Deserialize<'a> + serde::Serialize + std::fmt::Debug, + T: for<'a> serde::Deserialize<'a> + serde::Serialize + std::fmt::Debug, +{ + let mongo = beetle::mongo::connect_mongo(&config.mongo).await?; + let db = mongo.database(&config.mongo.database); + let origin_collection = db.collection::(collection.as_ref()); + let target_collection = db.collection::(collection.as_ref()); + + log::info!("====== RUNNING migration for '{}'", collection.as_ref()); + + let mut cursor = origin_collection + .find(bson::doc! { "device_id": { "$exists": 1 } }, None) + .await?; + + let mut updates = vec![]; + while let Some(n) = cursor.next().await { + log::debug!("attemping to migrate '{n:?}' in collection '{}'", collection.as_ref()); + let record = n.with_context(|| "unable to deserialize into old")?; + updates.push(mapper(record)); + } + + log::info!("applying {} update(s)", updates.len()); + for (update, query) in updates { + log::debug!("applying update '{update:?}'"); + target_collection + .find_one_and_replace( + query, + update, + mongodb::options::FindOneAndReplaceOptions::builder() + .return_document(mongodb::options::ReturnDocument::After) + .build(), + ) + .await?; + } + + log::info!("====== COMPLETE migration for '{}'", collection.as_ref()); + + Ok(()) +} diff --git a/src/beetle-srv/src/bin/cli/mod.rs b/src/beetle-srv/src/bin/cli/mod.rs index 31582ee..208bd62 100644 --- a/src/beetle-srv/src/bin/cli/mod.rs +++ b/src/beetle-srv/src/bin/cli/mod.rs @@ -20,6 +20,8 @@ pub struct RegistrarConfiguration { pub id_consumer_password: Option, /// The list of acl entries that should _not_ be invalidated when cleaning up. pub acl_user_allowlist: Option>, + /// The secret used to encrypt vendor api access tokens. + pub vendor_api_secret: String, } /// The CLI tool's internal configuration schema; this should basically mirror the same structure @@ -38,6 +40,9 @@ pub struct CommandLineConfig { mod disconnects; pub use disconnects::{clean_disconnects, print_connected}; +/// A list of migrations +pub mod migrate; + /// Commands associated with device permissions + authentication. mod acls; pub use acls::{invalidate_acls, provision, ProvisionCommand}; diff --git a/src/beetle-srv/src/config.rs b/src/beetle-srv/src/config.rs index ea1b9cd..0f2adc0 100644 --- a/src/beetle-srv/src/config.rs +++ b/src/beetle-srv/src/config.rs @@ -33,6 +33,9 @@ pub struct GoogleConfiguration { #[derive(Deserialize, Debug, Clone)] #[serde(rename_all = "snake_case")] pub struct MongoCollectionsConfiguration { + /// The collection of migration info. + pub migrations: String, + /// The collection name which holds our list of users (which includes their device access). pub users: String, @@ -65,6 +68,19 @@ pub struct MongoConfiguration { pub collections: MongoCollectionsConfiguration, } +/// The configuration specific to maintaining a registration of available ids. +#[derive(Deserialize, Debug, Clone)] +#[serde(rename_all = "snake_case", tag = "kind", content = "content")] +pub enum RegistrarAnalyticsConfiguration { + /// Configuration for amplitude.com api usage. + NewRelic { + /// The newrelic account id. + account_id: String, + /// The newrelic license key. + api_key: String, + }, +} + /// The configuration specific to maintaining a registration of available ids. #[derive(Deserialize, Debug, Clone)] #[serde(rename_all = "snake_case")] @@ -72,6 +88,7 @@ pub struct RegistrarConfiguration { // TODO: the cli's registar configuration uses these fields, and we may as well. /// The auth username that will be given on burn-in to devices. pub id_consumer_username: Option, + /// The auth password that will be given on burn-in to devices. pub id_consumer_password: Option, @@ -84,6 +101,15 @@ pub struct RegistrarConfiguration { /// Where to send devices on their initial connection pub initial_scannable_addr: String, + /// The amount of time between executions to explicitly wait. + pub interval_delay_ms: Option, + /// The secret used to encrypt vendor api access tokens. pub vendor_api_secret: String, + + /// If provided, this is the amount of time between device schedule refreshing. + pub device_schedule_refresh_interval_seconds: Option, + + /// Optional analytics configuration, used for monitoring queue health. + pub analytics_configuration: Option, } diff --git a/src/beetle-srv/src/lib.rs b/src/beetle-srv/src/lib.rs index ca0ba17..34060b1 100644 --- a/src/beetle-srv/src/lib.rs +++ b/src/beetle-srv/src/lib.rs @@ -51,3 +51,6 @@ pub mod vendor; /// Generally speaking, this module contains types stored in the mongo instance. It should be /// renamed to something better. pub mod schema; + +/// The reporting crate contains metrics gathering + publishing functionality. +pub(crate) mod reporting; diff --git a/src/beetle-srv/src/registrar/access.rs b/src/beetle-srv/src/registrar/access.rs index 0b3819a..e28d353 100644 --- a/src/beetle-srv/src/registrar/access.rs +++ b/src/beetle-srv/src/registrar/access.rs @@ -24,7 +24,8 @@ pub async fn user_access( // Now we want to find the authority record associated with this device. If there isn't one // already, one will be created with a default, exclusing model for the current user. - let initial_auth = Some(schema::DeviceAuthorityModel::Exclusive(user_id.clone())); + let initial_auth = Some(schema::DeviceAuthorityModel::Exclusive { owner: user_id.clone() }); + let serialized_auth = bson::to_bson(&initial_auth).map_err(|error| { log::warn!("unable to prepare initial auth - {error}"); io::Error::new(io::ErrorKind::Other, "authority-serialization") @@ -52,7 +53,7 @@ pub async fn user_access( log::trace!("current authority record - {authority_record:?}"); match authority_record.as_ref().and_then(|rec| rec.authority_model.as_ref()) { - Some(schema::DeviceAuthorityModel::Shared(owner, guests)) => { + Some(schema::DeviceAuthorityModel::Shared { owner, guests }) => { let mut found = false; for guest in guests { if guest == user_id { @@ -65,12 +66,12 @@ pub async fn user_access( return Ok(None); } } - Some(schema::DeviceAuthorityModel::Exclusive(owner)) => { + Some(schema::DeviceAuthorityModel::Exclusive { owner }) => { if owner != user_id { return Ok(None); } } - Some(schema::DeviceAuthorityModel::Public(_, _)) => return Ok(Some((AccessLevel::All, authority_record))), + Some(schema::DeviceAuthorityModel::Public { .. }) => return Ok(Some((AccessLevel::All, authority_record))), None => { log::warn!("no authority record found for '{device_id}'!"); } diff --git a/src/beetle-srv/src/registrar/device_schedule.rs b/src/beetle-srv/src/registrar/device_schedule.rs index e2286ae..597828a 100644 --- a/src/beetle-srv/src/registrar/device_schedule.rs +++ b/src/beetle-srv/src/registrar/device_schedule.rs @@ -53,13 +53,13 @@ where schedule.kind = match (should_enable, schedule.kind.take()) { (true, Some(kind)) => Some(kind), - (true, None) => Some(schema::DeviceScheduleKind::UserEventsBasic( - user_id.as_ref().to_string(), - )), + (true, None) => Some(schema::DeviceScheduleKind::UserEventsBasic { + user_oid: user_id.as_ref().to_string(), + }), (false, _) => None, }; - log::info!("applying new schedule - '{schedule:?}'"); + log::trace!("applying new schedule - '{schedule:?}'"); let result = collection .find_one_and_replace( @@ -78,21 +78,27 @@ where })?; worker - .enqueue_kind(super::RegistrarJobKind::RunDeviceSchedule( - device_id.as_ref().to_string(), - )) + .enqueue_kind(super::RegistrarJobKind::RunDeviceSchedule { + device_id: device_id.as_ref().to_string(), + refresh_nonce: None, + }) .await?; - log::info!("schedule update result - '{result:?}'"); + log::trace!("schedule update result - '{result:?}'"); Ok(()) } /// This method is responsible for immediately running any schedule associated with the device id /// provded in the job. -pub(super) async fn execute(mut worker: super::worker::WorkerHandle<'_>, device_id: S) -> anyhow::Result<()> +pub(super) async fn execute( + mut worker: super::worker::WorkerHandle<'_>, + device_id: S, + nonce: Option, +) -> anyhow::Result> where S: AsRef, + N: AsRef, { let db = worker.mongo.client.database(&worker.mongo.config.database); let schedules_collection = worker.device_schedule_collection()?; @@ -113,12 +119,19 @@ where ) })?; + match (nonce, schedule.refresh_nonce.as_ref()) { + (Some(job_nonce), Some(stored_nonce)) if job_nonce.as_ref() != stored_nonce.as_str() => { + return Ok(None); + } + _ => log::trace!("valid refresh being executed"), + } + match schedule.kind { None => { log::info!("nothing to do for device '{}' schedule", device_id.as_ref()); } - Some(schema::DeviceScheduleKind::UserEventsBasic(user_id)) => { - log::info!( + Some(schema::DeviceScheduleKind::UserEventsBasic { user_oid: user_id }) => { + log::trace!( "querying events for device '{}' and user '{}'", device_id.as_ref(), user_id @@ -153,7 +166,7 @@ where )?; partial_user.latest_token.token.access_token = decoded_token.claims.token; - log::info!( + log::trace!( "querying calendars for token - '{:?}'", partial_user.latest_token.created ); @@ -163,7 +176,7 @@ where .ok_or_else(|| { io::Error::new( io::ErrorKind::Other, - format!("no priamry calendar found for user '{user_id}'"), + format!("no primary calendar found for user '{user_id}'"), ) })?; @@ -173,7 +186,7 @@ where .filter_map(|raw_event| google::parse_event(&raw_event).ok()) .collect(); - log::info!( + log::trace!( "found {} events for user '{user_id}' ({:?})", events.len(), partial_user.name @@ -191,14 +204,20 @@ where } let now = chrono::Utc::now().timestamp_millis(); - log::debug!("setting last executed timestamp for '{}' to {now}", device_id.as_ref()); + + log::debug!( + "setting last executed timestamp for '{}' to {now} (nonce {:?})", + device_id.as_ref(), + schedule.refresh_nonce + ); + schedules_collection .find_one_and_update( bson::doc! { "device_id": device_id.as_ref() }, - bson::doc! { "$set": { "last_executed": now } }, + bson::doc! { "$set": { "last_executed": now, "latest_refresh_nonce": schedule.refresh_nonce } }, None, ) .await - .map(|_| ()) + .map(|_| Some(())) .with_context(|| format!("unable to update device schedule for '{}'", device_id.as_ref())) } diff --git a/src/beetle-srv/src/registrar/device_state.rs b/src/beetle-srv/src/registrar/device_state.rs index f858ddd..5429ab1 100644 --- a/src/beetle-srv/src/registrar/device_state.rs +++ b/src/beetle-srv/src/registrar/device_state.rs @@ -80,13 +80,15 @@ fn render_message_entry( entry: &schema::DeviceRenderingStateMessageEntry, acc: &mut Vec>, layout: MessageEntryLayout, -) -> () { +) { let is_first = acc.is_empty(); // Render the main content. - let mut message_component = rendering::components::StylizedMessage::default(); - message_component.message = entry.content.clone(); - message_component.size = Some(PRIMARY_TEXT_SIZE); + let mut message_component = rendering::components::StylizedMessage { + message: entry.content.clone(), + size: Some(PRIMARY_TEXT_SIZE), + ..Default::default() + }; apply_padding(&mut message_component); if is_first { message_component.margin = Some(rendering::OptionalBoundingBox { @@ -96,8 +98,10 @@ fn render_message_entry( } acc.push(message_component); - let mut origin_component = rendering::components::StylizedMessage::default(); - origin_component.size = Some(SECONDARY_TEXT_SIZE); + let mut origin_component = rendering::components::StylizedMessage { + size: Some(SECONDARY_TEXT_SIZE), + ..Default::default() + }; apply_padding(&mut origin_component); origin_component.margin = Some(rendering::OptionalBoundingBox { bottom: Some(10), @@ -109,7 +113,7 @@ fn render_message_entry( MessageEntryLayout::Separate => { origin_component.message = match &entry.origin { schema::DeviceStateMessageOrigin::Unknown => "unknown".to_string(), - schema::DeviceStateMessageOrigin::User(value) => value.clone(), + schema::DeviceStateMessageOrigin::User { nickname: value } => value.clone(), }; if let Some(ts) = entry.timestamp { @@ -127,7 +131,7 @@ fn render_message_entry( apply_padding(&mut time_component); acc.push(origin_component); acc.push(time_component); - return (); + return; } acc.push(origin_component); @@ -135,7 +139,7 @@ fn render_message_entry( MessageEntryLayout::Together => { let from_addr = match &entry.origin { schema::DeviceStateMessageOrigin::Unknown => "unknown".to_string(), - schema::DeviceStateMessageOrigin::User(value) => value.clone(), + schema::DeviceStateMessageOrigin::User { nickname: value } => value.clone(), }; origin_component.message = entry .timestamp @@ -146,19 +150,43 @@ fn render_message_entry( } } +/// Returns the "marker" for an event, where the marker is the event's stringified starting title. +/// Note that this also doubles as the _key_ of our `BTreeMap` when iterating over eents, which is +/// how the events are ultimately ordered. +fn event_marker(event: &google::ParsedEvent) -> Option { + match event.start { + google::ParsedEventTimeMarker::DateTime(datetime) => Some(datetime.format("%B %d").to_string()), + google::ParsedEventTimeMarker::Date(y, m, d) => { + chrono::NaiveDate::from_ymd_opt(y as i32, m, d).map(|d| d.format("%B %d").to_string()) + } + } +} + /// This method will actually build the render layout based on the current device rendering state. /// It is possible that this would be better implemented as an associated method on the /// `DeviceRenderingState` type itself, but the goal is to avoid _any_ methods directly built in /// the `schema` module (though it is tempting). fn render_state(state: &schema::DeviceRenderingState) -> anyhow::Result> { match state { - schema::DeviceRenderingState::ScheduleLayout(events, message_list) => { - let mut left = vec![]; + schema::DeviceRenderingState::ScheduleLayout { + events, + messages: message_list, + } => { + let mut events_by_date = std::collections::BTreeMap::new(); for event in events.iter().take(MAX_DISPLAYED_EVENTS) { - log::info!("rendering event '{event:?}'"); + log::trace!("rendering event '{event:?}'"); + let marker = match event_marker(event) { + Some(m) => m, + None => { + log::error!("event '{event:?}' is missing date"); + continue; + } + }; + let mut messages: Vec> = + events_by_date.remove(&marker).unwrap_or_default(); - left.push(rendering::components::StylizedMessage { + messages.push(rendering::components::StylizedMessage { message: event.summary.clone(), size: Some(PRIMARY_TEXT_SIZE), @@ -184,7 +212,7 @@ fn render_state(state: &schema::DeviceRenderingState) -> anyhow::Result anyhow::Result 0).then_some(10), + left: Some(5), + ..Default::default() + }), + ..Default::default() + }; + messages.insert(0, title_message); + + left.append(&mut messages); + } + let left = rendering::SplitContents::Messages(left); let right = rendering::SplitContents::Messages(messages); - let split = rendering::SplitLayout { left, right, ratio: 50 }; + let split = rendering::SplitLayout { left, right, ratio }; Ok(rendering::RenderLayout::Split(split)) } - schema::DeviceRenderingState::MessageList(list) => { + schema::DeviceRenderingState::MessageList { messages: list } => { let messages = list.iter().fold(Vec::with_capacity(list.len() * 2), |mut acc, entry| { - render_message_entry(&entry, &mut acc, MessageEntryLayout::Together); + render_message_entry(entry, &mut acc, MessageEntryLayout::Together); acc }); let left = rendering::SplitContents::Messages(messages); @@ -249,7 +298,7 @@ pub(super) async fn render_current( .with_context(|| format!("unable to load current device state for '{device_id}'"))? .ok_or_else(|| anyhow::Error::msg(format!("no device state found for '{device_id}'")))?; - log::info!("current render state for '{device_id}': {current_state:?}"); + log::info!("rendering current state for '{device_id}'"); let layout = current_state .rendering @@ -264,12 +313,8 @@ pub(super) async fn render_current( }) .unwrap_or(rendering::RenderLayout::Clear); - log::info!("device '{device_id}' attempting to render '{layout:?}'"); - let render_id = handle.render(device_id, layout).await?; - log::info!("render '{render_id}' scheduled for device '{device_id}'"); - Ok(()) } @@ -278,9 +323,9 @@ pub(super) async fn attempt_transition( mut handle: super::worker::WorkerHandle<'_>, transition_request: &DeviceStateTransitionRequest, ) -> anyhow::Result<()> { - log::info!("attempting to transition {transition_request:?}"); let states = handle.device_state_collection()?; let device_id = transition_request.device_id.clone(); + log::trace!("attempting to perform a state transition for device '{device_id}'"); let current_state = states .find_one_and_update( @@ -310,23 +355,23 @@ pub(super) async fn attempt_transition( })? .ok_or_else(|| anyhow::Error::msg(format!("unable to find device '{}'", &device_id)))?; - log::debug!("loaded current state for transition - {current_state:?}"); + log::trace!("loaded current state for transition - {current_state:?}"); let next_state = match (current_state.rendering, &transition_request.transition) { // push a message onto nothing. - (None, DeviceStateTransition::PushMessage(content, origin)) => { - Some(schema::DeviceRenderingState::MessageList(vec![ - schema::DeviceRenderingStateMessageEntry { - content: content.clone(), - origin: origin.clone(), - timestamp: Some(chrono::Utc::now()), - }, - ])) - } + (None, DeviceStateTransition::PushMessage(content, origin)) => Some(schema::DeviceRenderingState::MessageList { + messages: vec![schema::DeviceRenderingStateMessageEntry { + content: content.clone(), + origin: origin.clone(), + timestamp: Some(chrono::Utc::now()), + }], + }), // push a message onto a message list. ( - Some(schema::DeviceRenderingState::MessageList(mut current_list)), + Some(schema::DeviceRenderingState::MessageList { + messages: mut current_list, + }), DeviceStateTransition::PushMessage(content, origin), ) => { while current_list.len() > MAX_MESSAGE_LIST_LEN { @@ -337,12 +382,15 @@ pub(super) async fn attempt_transition( origin: origin.clone(), timestamp: Some(chrono::Utc::now()), }); - Some(schema::DeviceRenderingState::MessageList(current_list)) + Some(schema::DeviceRenderingState::MessageList { messages: current_list }) } // push a message onto a schedule. ( - Some(schema::DeviceRenderingState::ScheduleLayout(events, mut current_list)), + Some(schema::DeviceRenderingState::ScheduleLayout { + events, + messages: mut current_list, + }), DeviceStateTransition::PushMessage(content, origin), ) => { while current_list.len() > MAX_MESSAGE_LIST_LEN { @@ -354,7 +402,10 @@ pub(super) async fn attempt_transition( timestamp: Some(chrono::Utc::now()), }); - Some(schema::DeviceRenderingState::ScheduleLayout(events, current_list)) + Some(schema::DeviceRenderingState::ScheduleLayout { + events, + messages: current_list, + }) } (_, DeviceStateTransition::Clear) => { @@ -363,14 +414,19 @@ pub(super) async fn attempt_transition( } // set schedule onto an existing schedule. - (Some(schema::DeviceRenderingState::ScheduleLayout(_, messages)), DeviceStateTransition::SetSchedule(events)) => { - Some(schema::DeviceRenderingState::ScheduleLayout(events.clone(), messages)) - } + ( + Some(schema::DeviceRenderingState::ScheduleLayout { messages, .. }), + DeviceStateTransition::SetSchedule(events), + ) => Some(schema::DeviceRenderingState::ScheduleLayout { + events: events.clone(), + messages, + }), // set schedule onto anything (loss of messages). - (_, DeviceStateTransition::SetSchedule(events)) => { - Some(schema::DeviceRenderingState::ScheduleLayout(events.clone(), vec![])) - } + (_, DeviceStateTransition::SetSchedule(events)) => Some(schema::DeviceRenderingState::ScheduleLayout { + events: events.clone(), + messages: vec![], + }), }; let update = bson::to_document(&PartialStateUpdate { @@ -389,7 +445,7 @@ pub(super) async fn attempt_transition( ) .await?; - log::debug!("final state - {updated_state:?}"); + log::trace!("final state - {updated_state:?}"); let percolated_render_id = handle .enqueue_kind(super::jobs::RegistrarJobKind::Renders( diff --git a/src/beetle-srv/src/registrar/diagnostics.rs b/src/beetle-srv/src/registrar/diagnostics.rs index 0f62d1c..4d36d1d 100644 --- a/src/beetle-srv/src/registrar/diagnostics.rs +++ b/src/beetle-srv/src/registrar/diagnostics.rs @@ -103,7 +103,7 @@ where device_diagnostic.id ); - let mut queue = crate::rendering::queue::Queue::new(stream); + let mut queue = crate::rendering::queue::Queue::new(stream, &worker.config.vendor_api_secret); let mut initial_url = http_types::Url::parse(&worker.config.initial_scannable_addr).map_err(|error| { log::warn!("unable to create initial url for device - {error}"); io::Error::new(io::ErrorKind::Other, format!("{error}")) diff --git a/src/beetle-srv/src/registrar/jobs.rs b/src/beetle-srv/src/registrar/jobs.rs index be7bf31..f784b3d 100644 --- a/src/beetle-srv/src/registrar/jobs.rs +++ b/src/beetle-srv/src/registrar/jobs.rs @@ -34,7 +34,13 @@ pub enum RegistrarJobKind { MutateDeviceState(device_state::DeviceStateTransitionRequest), /// An immediate attempt to run the schedule for a device. - RunDeviceSchedule(String), + RunDeviceSchedule { + /// The id of the device to refresh based on its schedule. + device_id: String, + + /// The nonce associated with this attempt to run the schedule. + refresh_nonce: Option, + }, /// A job that will simply turn on or off the default schedule for a device, given a user whose /// calendar would be used. @@ -86,8 +92,12 @@ pub struct RegistrarJob { impl RegistrarJob { /// Serializes and encrypts a job. pub fn encrypt(self, config: &crate::config::RegistrarConfiguration) -> io::Result { + // TODO(job_encryption): using jwt here for ease, not the fact that it is the best. The + // original intent in doing this was to avoid having plaintext in our redis messages. + // Leveraging and existing depedency like `aes-gcm` would be awesome. let header = &jsonwebtoken::Header::default(); let secret = jsonwebtoken::EncodingKey::from_secret(config.vendor_api_secret.as_bytes()); + let exp = chrono::Utc::now() .checked_add_signed(chrono::Duration::minutes(1440)) .unwrap_or_else(chrono::Utc::now) @@ -96,70 +106,4 @@ impl RegistrarJob { jsonwebtoken::encode(header, &RegistrarJobEncrypted { exp, job: self }, &secret) .map_err(|error| io::Error::new(io::ErrorKind::Other, format!("unable to encrypt job - {error}"))) } - - /// Type Constructor. Will attempt to store an access token for a user. - pub fn access_token_refresh(handle: crate::vendor::google::TokenHandle, user_id: String) -> Self { - let id = uuid::Uuid::new_v4().to_string(); - Self { - id, - job: RegistrarJobKind::UserAccessTokenRefresh { handle, user_id }, - } - } - - /// Type constructor. Builds a request for toggling ownership record model type. - pub fn set_public_availability(device_id: S, transition: ownership::PublicAvailabilityChange) -> Self - where - S: std::convert::AsRef, - { - let id = uuid::Uuid::new_v4().to_string(); - let device_id = device_id.as_ref().to_string(); - - Self { - id, - job: RegistrarJobKind::OwnershipChange(ownership::DeviceOwnershipChangeRequest::SetPublicAvailability( - device_id, transition, - )), - } - } - - /// Type constructor. Builds a request for taking device ownership. - pub fn registration_scannable(device_id: S) -> Self - where - S: std::convert::AsRef, - { - let id = uuid::Uuid::new_v4().to_string(); - let device_id = device_id.as_ref().to_string(); - Self { - id, - job: RegistrarJobKind::Renders(RegistrarRenderKinds::RegistrationScannable(device_id)), - } - } - - /// Type constructor. Builds a request for taking device ownership. - pub fn rename_device(device_id: S, new_name: S) -> Self - where - S: std::convert::AsRef, - { - let id = uuid::Uuid::new_v4().to_string(); - let new_name = new_name.as_ref().to_string(); - let device_id = device_id.as_ref().to_string(); - Self { - id, - job: RegistrarJobKind::Rename(DeviceRenameRequest { device_id, new_name }), - } - } - - /// Type constructor. Builds a request for taking device ownership. - pub fn device_ownership(user_id: S, device_id: S) -> Self - where - S: std::convert::AsRef, - { - let id = uuid::Uuid::new_v4().to_string(); - let user_id = user_id.as_ref().to_string(); - let device_id = device_id.as_ref().to_string(); - Self { - id, - job: RegistrarJobKind::Ownership(ownership::DeviceOwnershipRequest { user_id, device_id }), - } - } } diff --git a/src/beetle-srv/src/registrar/mod.rs b/src/beetle-srv/src/registrar/mod.rs index a5f9ec1..1ad48d4 100644 --- a/src/beetle-srv/src/registrar/mod.rs +++ b/src/beetle-srv/src/registrar/mod.rs @@ -32,6 +32,7 @@ mod diagnostics; /// Defines the rename device job. mod rename; +pub(crate) use rename::DeviceRenameRequest; /// Defines the various jobs that will mutate device state. pub(crate) mod device_state; @@ -68,7 +69,19 @@ impl Configuration { pub async fn worker(self) -> io::Result { let mongo = worker::WorkerMongo::new(&self.mongo.url, self.mongo.clone()).await?; + let (reporting, sink) = self + .registrar + .analytics_configuration + .clone() + .map(crate::reporting::Worker::new) + .unzip(); + + if let Some(reporter) = reporting { + async_std::task::spawn(reporter.work()); + } + Ok(Worker { + reporting: sink, config: self.registrar, redis: self.redis, google: self.google, diff --git a/src/beetle-srv/src/registrar/ownership.rs b/src/beetle-srv/src/registrar/ownership.rs index 9542dd2..f7b09ec 100644 --- a/src/beetle-srv/src/registrar/ownership.rs +++ b/src/beetle-srv/src/registrar/ownership.rs @@ -25,10 +25,10 @@ pub enum DeviceOwnershipChangeRequest { #[derive(Debug, Clone, Deserialize, Serialize)] pub struct DeviceOwnershipRequest { /// The id of the device in question. - pub(super) device_id: String, + pub device_id: String, /// The id of the user in question. - pub(super) user_id: String, + pub user_id: String, } /// This is the real worker for this "ownership" kind of job. @@ -54,17 +54,23 @@ pub(super) async fn process_change(worker: &mut super::Worker, job: &DeviceOwner // Attempt to determine the next state based on the requested let new_state = match (&model.authority_model, state) { - (Some(schema::DeviceAuthorityModel::Exclusive(owner)), PublicAvailabilityChange::ToPublic) => { + (Some(schema::DeviceAuthorityModel::Exclusive { owner }), PublicAvailabilityChange::ToPublic) => { log::info!("moving from private, exclusive to public"); - schema::DeviceAuthorityModel::Public(owner.clone(), vec![]) + schema::DeviceAuthorityModel::Public { + owner: owner.clone(), + guests: vec![], + } } - (Some(schema::DeviceAuthorityModel::Public(owner, members)), PublicAvailabilityChange::ToPrivate) => { + (Some(schema::DeviceAuthorityModel::Public { owner, guests }), PublicAvailabilityChange::ToPrivate) => { log::info!("moving from public to shared"); - schema::DeviceAuthorityModel::Shared(owner.clone(), members.clone()) + schema::DeviceAuthorityModel::Shared { + owner: owner.clone(), + guests: guests.clone(), + } } - (Some(schema::DeviceAuthorityModel::Shared(owner, _)), PublicAvailabilityChange::ToPrivate) => { + (Some(schema::DeviceAuthorityModel::Shared { owner, .. }), PublicAvailabilityChange::ToPrivate) => { log::info!("moving from shared to exclusive"); - schema::DeviceAuthorityModel::Exclusive(owner.clone()) + schema::DeviceAuthorityModel::Exclusive { owner: owner.clone() } } other => { log::warn!("toggling public availability means nothing in combination with '{other:?}'"); @@ -77,10 +83,7 @@ pub(super) async fn process_change(worker: &mut super::Worker, job: &DeviceOwner device_id: id.clone(), authority_model: Some(new_state), }; - // let updates = bson::to_document(&new_model).map_err(|error| { - // log::warn!("unable to serialize - {error}"); - // io::Error::new(io::ErrorKind::Other, "serialization failure (auth model)") - // })?; + let result = models .find_one_and_replace( bson::doc! { "device_id": &id }, @@ -93,6 +96,7 @@ pub(super) async fn process_change(worker: &mut super::Worker, job: &DeviceOwner ) .await .map_err(|error| io::Error::new(io::ErrorKind::Other, format!("unable to update model - {error}")))?; + log::info!("matched update count - '{:?}'", result); } } @@ -182,14 +186,22 @@ pub(super) async fn register_device(worker: &mut super::Worker, job: &DeviceOwne if let Some(schema::DeviceAuthorityRecord { device_id, - authority_model: Some(schema::DeviceAuthorityModel::Public(original, mut public_users)), + authority_model: + Some(schema::DeviceAuthorityModel::Public { + owner: original, + guests: mut public_users, + }), }) = authority_model { log::info!("adding user to the public authority model tracking for device '{device_id}'"); public_users.push(job.user_id.clone()); + let new_model = schema::DeviceAuthorityRecord { device_id: device_id.clone(), - authority_model: Some(schema::DeviceAuthorityModel::Public(original, public_users)), + authority_model: Some(schema::DeviceAuthorityModel::Public { + owner: original, + guests: vec![], + }), }; let models = mongo diff --git a/src/beetle-srv/src/registrar/pool.rs b/src/beetle-srv/src/registrar/pool.rs index a025aee..63a833d 100644 --- a/src/beetle-srv/src/registrar/pool.rs +++ b/src/beetle-srv/src/registrar/pool.rs @@ -16,7 +16,7 @@ pub(super) async fn fill_pool(mut stream: &mut crate::redis::RedisConnection, mi true } kramer::Response::Item(kramer::ResponseValue::Integer(amount)) => { - log::info!("nothing to do, plenty of ids ('{amount}' vs min of '{min}')"); + log::trace!("nothing to do, plenty of ids ('{amount}' vs min of '{min}')"); false } other => { diff --git a/src/beetle-srv/src/registrar/schedule.rs b/src/beetle-srv/src/registrar/schedule.rs index b13e5da..df0784b 100644 --- a/src/beetle-srv/src/registrar/schedule.rs +++ b/src/beetle-srv/src/registrar/schedule.rs @@ -16,8 +16,9 @@ const SCHEDULE_REFRESH_SECONDS: i64 = 60 * 5; struct UserTokenInfo { #[allow(clippy::missing_docs_in_private_items)] oid: String, - #[allow(clippy::missing_docs_in_private_items)] - latest_token: crate::vendor::google::TokenHandle, + + /// The latest refresh + access tokens embedded in the user document. + latest_token: Option, } /// Performs a token refresh. @@ -98,20 +99,19 @@ async fn check_tokens(worker: &mut super::worker::WorkerHandle<'_>) -> io::Resul Ok(c) => c, }; + let token_ref = match current_handle.latest_token.as_mut() { + None => { + log::warn!("user '{}' is missing a 'latest_token'", current_handle.oid); + continue; + } + Some(token) => token, + }; + let now = chrono::Utc::now(); - let diff = now - .signed_duration_since(current_handle.latest_token.created) - .num_seconds() - .abs_diff(0); - - let expiration_diff = current_handle - .latest_token - .token - .expires_in - .checked_sub(diff) - .unwrap_or_default(); - - log::debug!( + let diff = now.signed_duration_since(token_ref.created).num_seconds().abs_diff(0); + let expiration_diff = token_ref.token.expires_in.checked_sub(diff).unwrap_or_default(); + + log::trace!( "next user access token - '{}' (created {diff} seconds ago) (expires in {expiration_diff} seconds)", current_handle.oid ); @@ -121,11 +121,7 @@ async fn check_tokens(worker: &mut super::worker::WorkerHandle<'_>) -> io::Resul let validation = jsonwebtoken::Validation::new(jsonwebtoken::Algorithm::HS256); let mut replaced_tokens = false; - match jsonwebtoken::decode::( - ¤t_handle.latest_token.token.access_token, - &key, - &validation, - ) { + match jsonwebtoken::decode::(&token_ref.token.access_token, &key, &validation) { Err(error) => { log::warn!("unable to decode acccess token - {error}, scheduling cleanup"); expired_user_ids.push(current_handle.oid); @@ -133,25 +129,19 @@ async fn check_tokens(worker: &mut super::worker::WorkerHandle<'_>) -> io::Resul } Ok(current_token) => { log::trace!("decoded original access token - '{}'", current_token.claims.token); - current_handle.latest_token.token.access_token = current_token.claims.token; + token_ref.token.access_token = current_token.claims.token; } } - if let Some(refresh) = ¤t_handle - .latest_token - .token - .refresh_token - .as_ref() - .and_then(|refresh| { - jsonwebtoken::decode::(refresh.as_str(), &key, &validation) - .map_err(|error| { - log::warn!("unable to decode peristed access token - {error}"); - }) - .ok() - }) - { + if let Some(refresh) = token_ref.token.refresh_token.as_ref().and_then(|refresh| { + jsonwebtoken::decode::(refresh.as_str(), &key, &validation) + .map_err(|error| { + log::warn!("unable to decode peristed access token - {error}"); + }) + .ok() + }) { log::trace!("decoded refresh token - '{:?}'", refresh.claims); - current_handle.latest_token.token.refresh_token = Some(refresh.claims.token.clone()); + token_ref.token.refresh_token = Some(refresh.claims.token.clone()); replaced_tokens = true; } @@ -166,21 +156,24 @@ async fn check_tokens(worker: &mut super::worker::WorkerHandle<'_>) -> io::Resul // Now we can actually attempt to make our api request for a new token. If it succeeds, we // will enqueue a job to persist it onto the user document, which will take care of // performing the encryption for us. - match refresh_token(worker.google, ¤t_handle.latest_token).await { + match refresh_token(worker.google, token_ref).await { Err(error) => { log::warn!("unable to refresh token for user '{}' ({error})", current_handle.oid); } Ok(mut updated_token) => { - log::info!( + log::trace!( "successfully updated token, queuing job to persist '{:?}'", updated_token.created ); // Be sure to persist the refresh token itself across updates. - updated_token.token.refresh_token = current_handle.latest_token.token.refresh_token; - let job = super::RegistrarJob::access_token_refresh(updated_token, current_handle.oid); + updated_token.token.refresh_token = token_ref.token.refresh_token.clone(); + let job = super::RegistrarJobKind::UserAccessTokenRefresh { + handle: updated_token, + user_id: current_handle.oid, + }; - if let Err(error) = worker.enqueue(job).await { + if let Err(error) = worker.enqueue_kind(job).await { log::warn!("failed access token refresh percolation - {error}"); } } @@ -212,10 +205,18 @@ async fn check_tokens(worker: &mut super::worker::WorkerHandle<'_>) -> io::Resul /// that have not been run in some time. For these, the worker will queue an execution job and move /// onto the next one. async fn check_schedules(worker: &mut super::worker::WorkerHandle<'_>) -> anyhow::Result<()> { - log::info!("registrar now checking for any schedules due for a refresh"); + log::trace!("registrar now checking for any schedules due for a refresh"); + let schedules_collection = worker.device_schedule_collection()?; + let interval_seconds = worker + .config + .device_schedule_refresh_interval_seconds + .as_ref() + .copied() + .unwrap_or(SCHEDULE_REFRESH_SECONDS); + let cutoff = chrono::Utc::now() - .checked_sub_signed(chrono::Duration::seconds(SCHEDULE_REFRESH_SECONDS)) + .checked_sub_signed(chrono::Duration::seconds(interval_seconds)) .ok_or_else(|| anyhow::Error::msg("unable to create cutoff date for device schedule refresh"))? .timestamp_millis(); @@ -226,25 +227,69 @@ async fn check_schedules(worker: &mut super::worker::WorkerHandle<'_>) -> anyhow ) .await?; - log::debug!("queried device schedules with cutoff"); + log::trace!("queried device schedules with cutoff - {cutoff}"); + let mut nonce_updates = vec![]; + while let Some(handle_result) = async_std::stream::StreamExt::next(&mut cursor).await { - log::debug!("found schedule needing refresh - {handle_result:?}"); - let device_id = match handle_result { + log::trace!("found schedule needing refresh - {handle_result:?}"); + + let schedule = match handle_result { Err(error) => { - log::warn!("strange device schedule problem - {error}"); + log::error!("strange device schedule problem - {error}"); continue; } - Ok(schedule) => schedule.device_id, + Ok(schedule) => schedule, }; + let device_id = schedule.device_id; + + match (schedule.refresh_nonce, schedule.latest_refresh_nonce) { + (Some(current), Some(latest)) if current == latest => {} + (None, None) => { + log::warn!("schedule['{device_id}'] no previous nonce, setting now"); + } + _ => { + log::trace!("schedule['{device_id}'] will be handled by next execution job"); + continue; + } + } + + let new_nonce = uuid::Uuid::new_v4().to_string(); + log::info!("schedule['{device_id}'] device is ready for refresh -> {new_nonce}"); + nonce_updates.push((device_id.clone(), new_nonce.clone())); + if let Err(error) = worker - .enqueue_kind(super::RegistrarJobKind::RunDeviceSchedule(device_id)) + .enqueue_kind(super::RegistrarJobKind::RunDeviceSchedule { + device_id, + refresh_nonce: Some(new_nonce), + }) .await { log::error!("unable to queue device schedule execution job - {error}"); } } + let schedules = worker.mongo.schedules_collection(); + for (device_id, new_nonce) in nonce_updates { + let result = schedules + .find_one_and_update( + bson::doc! { "device_id": &device_id }, + bson::doc! { "$set": { "refresh_nonce": new_nonce } }, + mongodb::options::FindOneAndUpdateOptions::builder() + .return_document(mongodb::options::ReturnDocument::After) + .build(), + ) + .await; + + match result { + Err(error) => { + log::error!("unable to update device '{device_id}' nonce - {error}"); + } + Ok(None) => log::error!("unable to find device '{device_id}'"), + Ok(Some(_)) => log::trace!("successfully updated device '{device_id}'"), + } + } + Ok(()) } diff --git a/src/beetle-srv/src/registrar/users.rs b/src/beetle-srv/src/registrar/users.rs index 7448204..8d9fd1c 100644 --- a/src/beetle-srv/src/registrar/users.rs +++ b/src/beetle-srv/src/registrar/users.rs @@ -32,6 +32,7 @@ where let day = chrono::Utc::now() .checked_add_signed(chrono::Duration::hours(24)) .unwrap_or_else(chrono::Utc::now); + let exp = day.timestamp() as usize; log::info!( diff --git a/src/beetle-srv/src/registrar/worker.rs b/src/beetle-srv/src/registrar/worker.rs index 2198a07..e8f2427 100644 --- a/src/beetle-srv/src/registrar/worker.rs +++ b/src/beetle-srv/src/registrar/worker.rs @@ -11,7 +11,7 @@ //! dumped into the `schedule` module adjacent to this. use super::{device_state, diagnostics, jobs, ownership, pool, rename, users, RegistrarJobKind}; -use crate::{config::RegistrarConfiguration, schema}; +use crate::{config::RegistrarConfiguration, reporting, schema}; use serde::Serialize; use std::io; @@ -21,7 +21,7 @@ use std::io; const DEFAULT_POOL_MINIMUM: u8 = 3; /// The most amount of jobs to try working at once. -const DEFAULT_JOB_BATCH_SIZE: u8 = 3; +const DEFAULT_JOB_BATCH_SIZE: u8 = 50; /// A wrapping container for our mongo types that provides the api for accessing collection. pub(super) struct WorkerMongo { @@ -47,6 +47,14 @@ impl WorkerMongo { Ok(Self { client: mongo, config }) } + + /// Returns the `mongodb` collection associated with our device schedule schema object. + pub(super) fn schedules_collection(&self) -> mongodb::Collection { + self + .client + .database(&self.config.database) + .collection(&self.config.collections.device_schedules) + } } /// This type provides the api that the worker "hands down" to the various functions it performs @@ -76,7 +84,7 @@ impl<'a> WorkerHandle<'a> { I: AsRef, S: Serialize, { - let (id, _) = crate::rendering::queue::Queue::new(self.redis) + let (id, _) = crate::rendering::queue::Queue::new(self.redis, &self.config.vendor_api_secret) .queue( device_id, &crate::rendering::QueuedRenderAuthority::Registrar, @@ -87,7 +95,8 @@ impl<'a> WorkerHandle<'a> { Ok(id) } - /// Actually creates the id we will be adding to our queue. + /// Actually creates the id we will be adding to our queue. This method is preferred to the + /// `enqueue` method, which expects the consumer to create the job id correctly. pub(super) async fn enqueue_kind(&mut self, job: super::RegistrarJobKind) -> io::Result { let id = uuid::Uuid::new_v4().to_string(); let job = super::RegistrarJob { id: id.clone(), job }; @@ -97,7 +106,7 @@ impl<'a> WorkerHandle<'a> { /// This function can be used by job processing functionality to "percolate" additional jobs /// back onto the queue. Such is the case for scheduled access token refreshes. - pub(super) async fn enqueue(&mut self, job: super::RegistrarJob) -> io::Result<()> { + async fn enqueue(&mut self, job: super::RegistrarJob) -> io::Result<()> { let id = job.id.clone(); let serialized = job.encrypt(self.config)?; @@ -161,14 +170,21 @@ impl<'a> WorkerHandle<'a> { pub struct Worker { /// The redis configuration. pub(super) redis: crate::config::RedisConfiguration, + /// The TCP connection we have to our redis host, if we currently have one. pub(super) connection: Option, + /// The mongo client + configuration pub(super) mongo: WorkerMongo, + /// Configuration specific to this worker. pub(super) config: RegistrarConfiguration, + /// Configuration for google apis. pub(super) google: crate::config::GoogleConfiguration, + + /// The handle for our reporting worker. + pub(super) reporting: Option>, } impl Worker { @@ -180,6 +196,7 @@ impl Worker { self.connection = match stream { None => { log::info!("no previous connection, attempting to connect now"); + crate::redis::connect(&self.redis) .await .map_err(|error| { @@ -190,7 +207,7 @@ impl Worker { } Some(mut redis_connection) => { - log::trace!("active redis connection, checking pool"); + let pending_job_count = self.report(&mut redis_connection).await.unwrap_or(1); // Attempt to fill our id pool if necessary. let amount = pool::fill_pool( @@ -205,32 +222,60 @@ impl Worker { // Attempt to mark all devices that have submitted an incoming ping since our last attempt // as active in our diagnostic collection. + let mut ingested_count = 0u16; for i in 0..self.config.active_device_chunk_size { log::trace!("checking active device queue"); let amount = diagnostics::mark_active(self, &mut redis_connection).await?; + ingested_count += amount as u16; if amount == 0 { - log::info!("no remaining active devices heard from after {i}"); + log::trace!("no remaining active devices heard from after {i}"); break; } } + if let Some(sink) = self.reporting.as_ref() { + let _ = sink + .send(reporting::Event::DeviceDiganosticBatchIngested { + device_count: ingested_count, + }) + .await; + } + if let Err(error) = super::schedule::check_schedule(self.handle(&mut redis_connection)).await { log::error!("failed scheduled registrar workflow - {error}"); } - let mut job_count = 0u8; - while job_count < DEFAULT_JOB_BATCH_SIZE { - log::info!("attempting to run job attempt #{job_count}"); - - job_count += match work_jobs(self, &mut redis_connection).await { - Err(error) => { - log::error!("registar job worker failed - {error}"); - DEFAULT_JOB_BATCH_SIZE - } - Ok(amt) if amt == 0 => DEFAULT_JOB_BATCH_SIZE, - Ok(amt) => amt, - }; + let mut processed_job_count = 0u8; + if pending_job_count > 0 { + log::trace!("attempting to process {pending_job_count} job(s)"); + while pending_job_count > 0 && processed_job_count < DEFAULT_JOB_BATCH_SIZE { + log::trace!("attempting to run job attempt #{processed_job_count}"); + + processed_job_count += match work_jobs(self, &mut redis_connection).await { + Err(error) => { + log::error!("registar job worker failed - {error}"); + break; + } + + // A none returned if there is nothing left for us to do, + Ok(None) => break, + + // Otherwise, we still have jobs (this one may have been ignored). + Ok(Some(amt)) => amt, + }; + } + } + + if let Some(sink) = self.reporting.as_ref() { + if let Err(error) = sink + .send(reporting::Event::JobBatchProcessed { + job_count: processed_job_count as u16, + }) + .await + { + log::error!("unable to send job batch processed event - {error}"); + } } Some(redis_connection) @@ -240,6 +285,34 @@ impl Worker { Ok(()) } + /// Reports queue metrics to the analytics configuration, if any. + async fn report(&self, redis: &mut crate::redis::RedisConnection) -> Option { + let sink = self.reporting.as_ref()?; + + let queue_length = match kramer::execute( + redis, + kramer::Command::<&str, &str>::Lists(kramer::ListCommand::Len(crate::constants::REGISTRAR_JOB_QUEUE)), + ) + .await + .map_err(|error| { + log::error!("unable to take length of job queue - {error}"); + }) + .ok()? + { + kramer::Response::Item(kramer::ResponseValue::Integer(value)) => value as u16, + other => { + log::warn!("strange response from job queue length sample - {other:?}"); + 0 + } + }; + + if let Err(error) = sink.send(reporting::Event::JobQueueLengthSample { queue_length }).await { + log::error!("failed sending event to reporting queue - {error}"); + } + + Some(queue_length) + } + /// Internally, this method is used to wrap our valid redis connection with other information /// that we will provide to the functions underneath us. fn handle<'a>(&'a mut self, redis: &'a mut crate::redis::RedisConnection) -> WorkerHandle<'a> { @@ -252,12 +325,32 @@ impl Worker { } } +/// This is abstracted from below so we can handle the first value in an array response, OR the +/// only value in a string response the same way. +fn decrypt_job(worker: &mut Worker, value: S) -> io::Result +where + S: AsRef, +{ + let key = jsonwebtoken::DecodingKey::from_secret(worker.config.vendor_api_secret.as_bytes()); + let validation = jsonwebtoken::Validation::new(jsonwebtoken::Algorithm::HS256); + + jsonwebtoken::decode::(value.as_ref(), &key, &validation) + .map_err(|error| { + log::error!("registrar worker unable to decode token - {}", error); + io::Error::new(io::ErrorKind::Other, "bad-jwt") + }) + .map(|job_container| job_container.claims.job) +} + /// Attempts to pop and execute the next job available for us. This happens _outside_ our worker's /// `work` method so we can enforce that we have a valid redis connection to use, which is the /// primary function of the `work` method. -async fn work_jobs(worker: &mut Worker, mut redis_connection: &mut crate::redis::RedisConnection) -> io::Result { +async fn work_jobs( + worker: &mut Worker, + mut redis_connection: &mut crate::redis::RedisConnection, +) -> io::Result> { // Attempt to get the next job. - log::info!( + log::trace!( "attempting to pop next actual job from '{}'", crate::constants::REGISTRAR_JOB_QUEUE ); @@ -267,7 +360,7 @@ async fn work_jobs(worker: &mut Worker, mut redis_connection: &mut crate::redis: kramer::Command::Lists::<&str, &str>(kramer::ListCommand::Pop( kramer::Side::Left, crate::constants::REGISTRAR_JOB_QUEUE, - Some((None, 3)), + None, )), ) .await? @@ -278,140 +371,187 @@ async fn work_jobs(worker: &mut Worker, mut redis_connection: &mut crate::redis: .get(1) .and_then(|kind| match kind { kramer::ResponseValue::String(value) => Some(value), - _ => None, + other => { + log::error!("strange response from registrar job pop - {other:?}"); + None + } }) .and_then(|string| { - log::debug!("pulled encrypted job ({} chars)", string.len()); + log::trace!("pulled encrypted job ({} chars)", string.len()); + decrypt_job(worker, string).ok() + }), + kramer::Response::Item(kramer::ResponseValue::String(value)) => decrypt_job(worker, value).ok(), - let key = jsonwebtoken::DecodingKey::from_secret(worker.config.vendor_api_secret.as_bytes()); - let validation = jsonwebtoken::Validation::new(jsonwebtoken::Algorithm::HS256); + // Fortunately, the queue will tell us that we're empty without having to check ourselves. + kramer::Response::Item(kramer::ResponseValue::Empty) => return Ok(None), - jsonwebtoken::decode::(string, &key, &validation) - .map_err(|error| { - log::error!("registrar worker unable to decode token - {}", error); - io::Error::new(io::ErrorKind::Other, "bad-jwt") - }) - .map(|job_container| job_container.claims.job) - .ok() - }), - _ => None, + other => { + log::error!("very strange response from registrar job pop - {other:?}"); + None + } }; - if let Some(job_container) = next_job { - let result = match &job_container.job { - RegistrarJobKind::MutateDeviceState(transition) => { - device_state::attempt_transition(worker.handle(redis_connection), transition) - .await - .map(|_| schema::jobs::JobResult::Success(schema::jobs::SuccessfulJobResult::Terminal)) - .map_err(|error| io::Error::new(io::ErrorKind::Other, error.to_string())) - } + let job_container = match next_job { + None => return Ok(Some(0)), + Some(job) => job, + }; - RegistrarJobKind::Renders(super::jobs::RegistrarRenderKinds::CurrentDeviceState(device_id)) => { - device_state::render_current(worker.handle(redis_connection), device_id) - .await - .map(|_| schema::jobs::JobResult::Success(schema::jobs::SuccessfulJobResult::Terminal)) - .map_err(|error| io::Error::new(io::ErrorKind::Other, error.to_string())) - } + log::trace!( + "jobType[{:?}] is now processing", + std::mem::discriminant(&job_container.job) + ); - RegistrarJobKind::UserAccessTokenRefresh { handle, user_id } => { - users::process_access_token(worker, handle, user_id) - .await - .map(|_| schema::jobs::JobResult::Success(schema::jobs::SuccessfulJobResult::Terminal)) - } + let mut returned_state = Some(1); - // Process requests-for-render-request jobs. This is a bit odd since we already have the - // renderer jobs too, but is helpful for providing easier ergonomics into sending device - // registration qr codes. - RegistrarJobKind::Renders(jobs::RegistrarRenderKinds::RegistrationScannable(device_id)) => { - log::info!("sending initial scannable link to device '{device_id}'"); - let mut initial_url = http_types::Url::parse(&worker.config.initial_scannable_addr).map_err(|error| { - log::warn!("unable to create initial url for device - {error}"); - io::Error::new(io::ErrorKind::Other, format!("{error}")) - })?; - - // scope our mutable borrow/mutation so it is dropped before we take ownship when we - // `to_string` it onto our layout. - { - let mut query = initial_url.query_pairs_mut(); - query.append_pair("device_target_id", device_id); - } + let result = match &job_container.job { + RegistrarJobKind::MutateDeviceState(transition) => { + log::info!( + "job[{}] processing render state transition for '{}'", + job_container.id, + transition.device_id + ); + + device_state::attempt_transition(worker.handle(redis_connection), transition) + .await + .map(|_| schema::jobs::JobResult::Success(schema::jobs::SuccessfulJobResult::Terminal)) + .map_err(|error| io::Error::new(io::ErrorKind::Other, error.to_string())) + } - let mut queue = crate::rendering::queue::Queue::new(redis_connection); - let layout = crate::rendering::RenderVariant::scannable(initial_url.to_string()); - let job_result = queue - .queue(&device_id, &crate::rendering::QueuedRenderAuthority::Registrar, layout) - .await; + RegistrarJobKind::Renders(super::jobs::RegistrarRenderKinds::CurrentDeviceState(device_id)) => { + log::debug!( + "job[{}] processing current device state render request for '{device_id}'", + job_container.id + ); - job_result.map(|_| schema::jobs::JobResult::Success(schema::jobs::SuccessfulJobResult::Terminal)) - } + device_state::render_current(worker.handle(redis_connection), device_id) + .await + .map(|_| schema::jobs::JobResult::Success(schema::jobs::SuccessfulJobResult::Terminal)) + .map_err(|error| io::Error::new(io::ErrorKind::Other, error.to_string())) + } - RegistrarJobKind::RunDeviceSchedule(device_id) => { - log::info!("immediately executing device schedule for '{device_id}'"); + RegistrarJobKind::UserAccessTokenRefresh { handle, user_id } => { + log::info!( + "job[{}] processing new token refresh for '{}'", + job_container.id, + user_id + ); + users::process_access_token(worker, handle, user_id) + .await + .map(|_| schema::jobs::JobResult::Success(schema::jobs::SuccessfulJobResult::Terminal)) + } - super::device_schedule::execute(worker.handle(redis_connection), &device_id) - .await - .map_err(|err| io::Error::new(io::ErrorKind::Other, err.to_string())) - .map(|_| schema::jobs::JobResult::Success(schema::jobs::SuccessfulJobResult::Terminal)) + // Process requests-for-render-request jobs. This is a bit odd since we already have the + // renderer jobs too, but is helpful for providing easier ergonomics into sending device + // registration qr codes. + RegistrarJobKind::Renders(jobs::RegistrarRenderKinds::RegistrationScannable(device_id)) => { + log::info!( + "job[{}] sending initial scannable link to device '{device_id}'", + job_container.id + ); + + let mut initial_url = http_types::Url::parse(&worker.config.initial_scannable_addr).map_err(|error| { + log::warn!("unable to create initial url for device - {error}"); + io::Error::new(io::ErrorKind::Other, format!("{error}")) + })?; + + // scope our mutable borrow/mutation so it is dropped before we take ownship when we + // `to_string` it onto our layout. + { + let mut query = initial_url.query_pairs_mut(); + query.append_pair("device_target_id", device_id); } - RegistrarJobKind::ToggleDefaultSchedule { - user_id, - device_id, - should_enable, - } => { - log::info!("toggling default device schedule for device '{device_id}' to user '{user_id}' ({should_enable})"); + let mut queue = crate::rendering::queue::Queue::new(redis_connection, &worker.config.vendor_api_secret); + let layout = crate::rendering::RenderVariant::scannable(initial_url.to_string()); + let job_result = queue + .queue(&device_id, &crate::rendering::QueuedRenderAuthority::Registrar, layout) + .await; + + job_result.map(|_| schema::jobs::JobResult::Success(schema::jobs::SuccessfulJobResult::Terminal)) + } - super::device_schedule::toggle(worker.handle(redis_connection), device_id, user_id, *should_enable) + RegistrarJobKind::RunDeviceSchedule { + device_id, + refresh_nonce, + } => { + log::info!( + "job[{}] immediately executing device schedule for '{device_id}'", + job_container.id + ); + + let execution_result = + super::device_schedule::execute(worker.handle(redis_connection), &device_id, refresh_nonce.as_ref()) .await - .map(|_| schema::jobs::JobResult::Success(schema::jobs::SuccessfulJobResult::Terminal)) - } + .map_err(|err| io::Error::new(io::ErrorKind::Other, err.to_string())); - // Process device rename requests. - RegistrarJobKind::Rename(request) => { - log::info!("device rename request being processed - {request:?}"); - let job_result = rename::rename(worker, request).await; - job_result.map(|_| schema::jobs::JobResult::Success(schema::jobs::SuccessfulJobResult::Terminal)) + // A bit of special casing here - since we have the opportunity to be dealing with stale + // attempts to run a device schedule, we want to return a `Some(0)` if this job was + // unprocessable. + if let Ok(None) = execution_result { + returned_state = Some(0); } - // Process device ownership change requests. - RegistrarJobKind::OwnershipChange(request) => { - let job_result = ownership::process_change(worker, request).await; - job_result.map(|_| schema::jobs::JobResult::Success(schema::jobs::SuccessfulJobResult::Terminal)) - } + execution_result.map(|_| schema::jobs::JobResult::Success(schema::jobs::SuccessfulJobResult::Terminal)) + } - // Process device ownership claiming requests. - RegistrarJobKind::Ownership(ownership_request) => { - log::debug!("registrar found next ownership claims job - '{ownership_request:?}'"); - let job_result = ownership::register_device(worker, ownership_request).await; - log::debug!("registration result - {job_result:?}"); - job_result.map(|_| schema::jobs::JobResult::Success(schema::jobs::SuccessfulJobResult::Terminal)) - } - }; + RegistrarJobKind::ToggleDefaultSchedule { + user_id, + device_id, + should_enable, + } => { + log::info!( + "job[{}] toggling default device schedule for device '{device_id}' to user '{user_id}' ({should_enable})", + job_container.id + ); + + super::device_schedule::toggle(worker.handle(redis_connection), device_id, user_id, *should_enable) + .await + .map(|_| schema::jobs::JobResult::Success(schema::jobs::SuccessfulJobResult::Terminal)) + } - let serialized_result = match result { - Ok(c) => serde_json::to_string(&c), - Err(c) => { - log::error!("job failure - {c:?}, recording!"); - serde_json::to_string(&schema::jobs::JobResult::Failure(c.to_string())) - } + // Process device rename requests. + RegistrarJobKind::Rename(request) => { + log::info!("device rename request being processed - {request:?}"); + let job_result = rename::rename(worker, request).await; + job_result.map(|_| schema::jobs::JobResult::Success(schema::jobs::SuccessfulJobResult::Terminal)) } - .map_err(|error| { - log::error!("Unable to serialize job result - {error}"); - io::Error::new(io::ErrorKind::Other, format!("job-result-serialization - {error}")) - })?; - kramer::execute( - &mut redis_connection, - kramer::Command::Hashes(kramer::HashCommand::Set( - crate::constants::REGISTRAR_JOB_RESULTS, - kramer::Arity::One((&job_container.id, serialized_result)), - kramer::Insertion::Always, - )), - ) - .await?; + // Process device ownership change requests. + RegistrarJobKind::OwnershipChange(request) => { + let job_result = ownership::process_change(worker, request).await; + job_result.map(|_| schema::jobs::JobResult::Success(schema::jobs::SuccessfulJobResult::Terminal)) + } - return Ok(1); + // Process device ownership claiming requests. + RegistrarJobKind::Ownership(ownership_request) => { + log::debug!("registrar found next ownership claims job - '{ownership_request:?}'"); + let job_result = ownership::register_device(worker, ownership_request).await; + log::debug!("registration result - {job_result:?}"); + job_result.map(|_| schema::jobs::JobResult::Success(schema::jobs::SuccessfulJobResult::Terminal)) + } + }; + + let serialized_result = match result { + Ok(job_result) => serde_json::to_string(&job_result), + Err(job_error) => { + log::error!("job failure - {job_error:?}, recording!"); + serde_json::to_string(&schema::jobs::JobResult::Failure(job_error.to_string())) + } } + .map_err(|error| { + log::error!("Unable to serialize job result - {error}"); + io::Error::new(io::ErrorKind::Other, format!("job-result-serialization - {error}")) + })?; + + kramer::execute( + &mut redis_connection, + kramer::Command::Hashes(kramer::HashCommand::Set( + crate::constants::REGISTRAR_JOB_RESULTS, + kramer::Arity::One((&job_container.id, serialized_result)), + kramer::Insertion::Always, + )), + ) + .await?; - Ok(0) + Ok(returned_state) } diff --git a/src/beetle-srv/src/rendering/mod.rs b/src/beetle-srv/src/rendering/mod.rs index c65e916..36b39e9 100644 --- a/src/beetle-srv/src/rendering/mod.rs +++ b/src/beetle-srv/src/rendering/mod.rs @@ -101,17 +101,7 @@ where } Self::Split(SplitLayout { left, right, ratio }) => { - let left_max = match ratio { - 25 => dimensions.0 / 4, - 33 => dimensions.0 / 3, - 50 => dimensions.0 / 2, - 66 => dimensions.0 - (dimensions.0 / 3), - 75 => dimensions.0 - (dimensions.0 / 4), - 80 => dimensions.0 - (dimensions.0 / 5), - // TODO: support more breakpoints, or do actual math. This is just implemented this way - // for quick, strict support. - _ => dimensions.0 / 2, - }; + let left_max = (dimensions.0 as f32 * (ratio as f32 / 100f32)).round() as u32; match left { SplitContents::Messages(messages) => { diff --git a/src/beetle-srv/src/rendering/queue.rs b/src/beetle-srv/src/rendering/queue.rs index 4b403dd..43a1217 100644 --- a/src/beetle-srv/src/rendering/queue.rs +++ b/src/beetle-srv/src/rendering/queue.rs @@ -31,11 +31,25 @@ pub struct QueuedRender { pub(super) device_id: String, } +/// A wrapping type that will be encrypted when pushed into redis. +#[derive(Deserialize, Debug, Serialize)] +#[serde(rename_all = "snake_case")] +pub(super) struct QueuedRenderEncrypted { + /// The exp field used by jwt. + pub(super) exp: u32, + + /// The inner job type. + pub(super) job: QueuedRender, +} + /// A type that wraps a connection and provides everything we need to add messages to our rendering /// queue. pub struct Queue<'a, C> { /// The underlying connection to redis. connection: &'a mut C, + + /// The secret being used to encrypt + secret: &'a String, } impl<'a, C> Queue<'a, C> @@ -43,8 +57,8 @@ where C: async_std::io::Read + async_std::io::Write + std::marker::Unpin, { /// Creates the new rendering queue around a connection. - pub fn new(connection: &'a mut C) -> Self { - Queue { connection } + pub fn new(connection: &'a mut C, secret: &'a String) -> Self { + Queue { connection, secret } } /// Creates a queued render, serializes it, and adds it to the redis list for popping later. @@ -58,14 +72,28 @@ where S: AsRef, T: Serialize, { + let id = uuid::Uuid::new_v4().to_string(); let queued_item = QueuedRender { - id: uuid::Uuid::new_v4().to_string(), + id: id.clone(), layout, device_id: device_id.as_ref().to_string(), auth: auth.clone(), }; - let json = serde_json::to_string(&queued_item)?; - log::info!("attempting to push a rendering request onto the queue for {json:?}"); + + // TODO(job_encryption): using jwt here for ease, not the fact that it is the best. The + // original intent in doing this was to avoid having plaintext in our redis messages. + // Leveraging and existing depedency like `aes-gcm` would be awesome. + let header = jsonwebtoken::Header::default(); + let secret = jsonwebtoken::EncodingKey::from_secret(self.secret.as_bytes()); + let exp = chrono::Utc::now() + .checked_add_signed(chrono::Duration::minutes(1440)) + .unwrap_or_else(chrono::Utc::now) + .timestamp() as u32; + let json = jsonwebtoken::encode(&header, &QueuedRenderEncrypted { exp, job: queued_item }, &secret) + .map_err(|error| io::Error::new(io::ErrorKind::Other, format!("unable to encrypt job '{id}' - {error}")))?; + + log::info!("pushing into render '{id}' into rendering queue"); + let res = kramer::execute( &mut self.connection, kramer::Command::Lists(kramer::ListCommand::Push( @@ -74,16 +102,20 @@ where kramer::Arity::One(&json), )), ) - .await?; + .await + .map_err(|error| { + log::error!("unable to push render job '{id}' into queue - {error:?}"); + error + })?; match res { kramer::Response::Item(kramer::ResponseValue::Integer(amount)) => { - log::info!("rendering request queued. current queue size {amount}"); - Ok((queued_item.id, amount)) + log::info!("rendering request '{id}' queued. current queue size {amount}"); + Ok((id, amount)) } other => Err(io::Error::new( io::ErrorKind::Other, - format!("strange response from queue attempt - {other:?}"), + format!("strange response from queue '{id}' attempt - {other:?}"), )), } } diff --git a/src/beetle-srv/src/rendering/renderer.rs b/src/beetle-srv/src/rendering/renderer.rs index f20d92e..16e2729 100644 --- a/src/beetle-srv/src/rendering/renderer.rs +++ b/src/beetle-srv/src/rendering/renderer.rs @@ -1,3 +1,4 @@ +use super::queue; use crate::{registrar, schema}; use std::io; @@ -59,6 +60,12 @@ impl Worker { Some((None, 5)), )); + // TODO(job_encryption): using jwt here for ease, not the fact that it is the best. The + // original intent in doing this was to avoid having plaintext in our redis messages. + // Leveraging and existing depedency like `aes-gcm` would be awesome. + let key = jsonwebtoken::DecodingKey::from_secret(self.config.0.registrar.vendor_api_secret.as_bytes()); + let validation = jsonwebtoken::Validation::new(jsonwebtoken::Algorithm::HS256); + let payload = match kramer::execute(&mut c, cmd).await { Err(error) => { log::warn!("nuking redis connection; failed pop execution - {error}"); @@ -71,22 +78,10 @@ impl Worker { Ok(kramer::Response::Item(kramer::ResponseValue::String(payload))) => { log::debug!("found payload - '{payload}'"); - serde_json::from_str::>(payload.as_str()) - .map_err(|error| { - log::warn!("unable to deserialize queued item - {error}"); - error - }) - .ok() + Some(payload) } Ok(kramer::Response::Array(contents)) => match contents.get(1) { - Some(kramer::ResponseValue::String(payload)) => { - serde_json::from_str::>(payload.as_str()) - .map_err(|error| { - log::warn!("unable to deserialize queued item - {error}"); - error - }) - .ok() - } + Some(kramer::ResponseValue::String(payload)) => Some(payload.clone()), other => { log::warn!("strange response from rendering queue pop - {other:?}"); None @@ -96,13 +91,29 @@ impl Worker { log::warn!("strange response from rendering queue pop - {other:?}"); None } - }; + } + .and_then(|response_string| { + jsonwebtoken::decode::>(&response_string, &key, &validation) + .map_err(|error| { + log::error!("registrar worker unable to decode token - {}", error); + io::Error::new(io::ErrorKind::Other, "bad-jwt") + }) + .ok() + }); - if let Some(queued_render) = payload { - log::info!("found render, rasterizing + publish to '{}'", queued_render.device_id); + if let Some(queued_render) = payload.map(|p| p.claims.job) { + log::info!( + "found render '{}', rasterizing + publish to '{}'", + queued_render.id, + queued_render.device_id + ); let queue_id = crate::redis::device_message_queue_id(&queued_render.device_id); + if let Err(error) = self.clear_pending(&mut c, &queue_id).await { + log::error!("unable to clear stale renders for '{queue_id}' - {error:?}"); + } + // Actually attempt to rasterize the layout into bytes and send it along to the device via // the device redis queue. let queue_error = match self.send_layout(&mut c, &queue_id, queued_render.layout.clone()).await { @@ -117,6 +128,33 @@ impl Worker { io::Error::new(io::ErrorKind::Other, "serialization error".to_string()) })?; + match histories + .find_one_and_update( + bson::doc! { "device_id": &queued_render.device_id }, + bson::doc! { "$push": { "render_history": { "$each": [ ], "$slice": -10 } } }, + mongodb::options::FindOneAndUpdateOptions::builder() + .upsert(true) + .return_document(mongodb::options::ReturnDocument::After) + .build(), + ) + .await + { + Err(error) => { + log::warn!( + "render[{}] unable to truncate device '{}' history - {error}", + queued_render.id, + queued_render.device_id + ); + } + Ok(_) => { + log::warn!( + "render[{}] truncated history of device '{}' history successfully", + queued_render.id, + queued_render.device_id + ); + } + } + if let Err(error) = histories .find_one_and_update( bson::doc! { "device_id": &queued_render.device_id }, @@ -128,7 +166,11 @@ impl Worker { ) .await { - log::warn!("unable to update device diagnostic total message count - {error}"); + log::warn!( + "render[{}] unable to update device '{}' message history - {error}", + queued_render.id, + queued_render.device_id + ); } // Lastly, update our job results hash with an entry for this render attempt. This is how @@ -142,6 +184,13 @@ impl Worker { log::warn!("unable to complete serialization of render result - {error}"); io::Error::new(io::ErrorKind::Other, "result-failure") })?; + + log::info!( + "render[{}] setting job result for device '{}' - '{serialized_result}'", + queued_render.id, + queued_render.device_id + ); + if let Err(error) = kramer::execute( &mut c, kramer::Command::Hashes(kramer::HashCommand::Set( @@ -155,7 +204,7 @@ impl Worker { log::warn!("unable to update job result - {error}"); } - log::info!("mongo diagnostics updated for '{}'", queued_render.device_id); + log::info!("job '{}' for '{}' complete", queued_render.id, queued_render.device_id); } self.connections.1 = Some(c); @@ -221,18 +270,51 @@ impl Worker { Ok(()) } + + /// Given a queue id, the goal of this method is to remove all things in it. This does check the + /// length before doing so, which is nice for logging purposes. + async fn clear_pending( + &mut self, + mut connection: &mut crate::redis::RedisConnection, + queue_id: &str, + ) -> io::Result<()> { + log::info!("clearing all pending renders for '{queue_id}'"); + let len = kramer::Command::<&str, &str>::Lists(kramer::ListCommand::Len(queue_id)); + let res = kramer::execute(&mut connection, &len).await?; + let count = match res { + kramer::Response::Item(kramer::ResponseValue::Integer(i)) => i, + other => { + return Err(io::Error::new( + io::ErrorKind::Other, + format!("invalid len response of render queue '{queue_id}'- {other:?}"), + )) + } + }; + + if count <= 0 { + log::info!("queue '{queue_id} had {count} stale messages, ignoring"); + return Ok(()); + } + + log::info!("queue '{queue_id}' has {count} stale messages, deleting"); + let del = kramer::Command::<&str, &str>::Lists(kramer::ListCommand::Trim(queue_id, count, 0)); + + kramer::execute(connection, &del).await.map(|_| ()).map_err(|error| { + io::Error::new( + io::ErrorKind::Other, + format!("failed deletion of stale messages on '{queue_id}' - {error:?}"), + ) + }) + } } /// The main entrypoint for our renderers. pub async fn run(config: crate::registrar::Configuration) -> io::Result<()> { let mut tick = 0u8; - let mut interval = async_std::stream::interval(std::time::Duration::from_secs(1)); let mut worker = Worker::new(config).await?; log::info!("renderer running"); loop { - async_std::stream::StreamExt::next(&mut interval).await; - if let Err(error) = worker.tick().await { log::warn!("worker failed on tick {tick} - {error}"); } diff --git a/src/beetle-srv/src/reporting.rs b/src/beetle-srv/src/reporting.rs new file mode 100644 index 0000000..3980f2b --- /dev/null +++ b/src/beetle-srv/src/reporting.rs @@ -0,0 +1,182 @@ +//! This module contains the code related to useful runtime metrics that can be reported to third +//! party analyics platforms like newrelic. + +use serde::Serialize; +use std::io; + +/// The event name in newrelic for our queue health samples. +const QUEUE_DIAGNOSTIC_SAMPLE_EVENT_NAME: &str = "registrarJobQueueSample"; + +/// The event name in newrelic for our job processed counts. +const JOB_BATCH_PROCESSED_EVENT_NAME: &str = "registrarJobBatchProcessed"; + +/// The event name in newrelic for our device diganostic handled. +const DEVICE_DIAGNOSTIC_INGESTION_BATCH: &str = "deviceDiagnosticIngestionBatch"; + +/// This is the enumerated type that holds all things sent to our third party analytics platform +/// for platform health monitoring. +#[derive(Serialize, Debug)] +pub enum Event { + /// This event is submitted by the registrar whenever it hears from devices on the incoming + /// queue. + DeviceDiganosticBatchIngested { + /// The amount of devices processed. + device_count: u16, + }, + /// When the registrar finishes working jobs up to the max batch amount, this event is sent. + JobBatchProcessed { + /// The amount of jobs processed. + job_count: u16, + }, + /// The length of our job queue. + JobQueueLengthSample { + /// The length of our job queue. + queue_length: u16, + }, +} + +/// The worker that holds a configuration, and works off sending events to some endpoint from a +/// channel. +pub struct Worker { + /// The configuration. + config: crate::config::RegistrarAnalyticsConfiguration, + + /// The receiving side of our event queue. + receiver: async_std::channel::Receiver, +} + +/// The container of our analytic event. +#[derive(Debug, serde::Serialize)] +struct DeviceDiganosticBatchIngested { + /// The amount of jobs processed. + #[serde(rename = "device_count")] + device_count: u16, + + /// The constant event name for this payload. + #[serde(rename = "eventType")] + event_type: &'static str, +} + +/// The container of our analytic event. +#[derive(Debug, serde::Serialize)] +struct BatchProcessedSample { + /// The amount of jobs processed. + #[serde(rename = "job_count")] + job_count: u16, + + /// The constant event name for this payload. + #[serde(rename = "eventType")] + event_type: &'static str, +} + +/// The container of our analytic event. +#[derive(Debug, serde::Serialize)] +struct QueueDiagnosticSample { + /// The length of our job queue. + #[serde(rename = "queue_length")] + queue_length: u16, + + /// The constant event name for this payload. + #[serde(rename = "eventType")] + event_type: &'static str, +} + +impl Worker { + /// The constructor. + pub fn new(config: crate::config::RegistrarAnalyticsConfiguration) -> (Self, async_std::channel::Sender) { + let (sender, receiver) = async_std::channel::unbounded(); + (Self { config, receiver }, sender) + } + + /// The main, async entrypoint for our reporting worker. It is meant to be pretty basic, we're + /// just taking the next item from a channel and doing something with it based on our + /// configuration. + pub async fn work(self) -> io::Result<()> { + loop { + if self.receiver.is_closed() { + return Err(io::Error::new(io::ErrorKind::Other, "reporting queue closed")); + } + + let event = self.receiver.recv().await.map_err(|error| { + io::Error::new( + io::ErrorKind::Other, + format!("failed taking next reporting event - {error}"), + ) + })?; + + log::trace!("reporting has next event to send along - {event:?}"); + + let result = match (event, &self.config) { + ( + Event::DeviceDiganosticBatchIngested { device_count }, + crate::config::RegistrarAnalyticsConfiguration::NewRelic { account_id, api_key }, + ) => { + self + .send_newrelic( + DeviceDiganosticBatchIngested { + device_count, + event_type: DEVICE_DIAGNOSTIC_INGESTION_BATCH, + }, + account_id, + api_key, + ) + .await + } + ( + Event::JobBatchProcessed { job_count }, + crate::config::RegistrarAnalyticsConfiguration::NewRelic { account_id, api_key }, + ) => { + self + .send_newrelic( + BatchProcessedSample { + job_count, + event_type: JOB_BATCH_PROCESSED_EVENT_NAME, + }, + account_id, + api_key, + ) + .await + } + ( + Event::JobQueueLengthSample { queue_length }, + crate::config::RegistrarAnalyticsConfiguration::NewRelic { account_id, api_key }, + ) => { + let sample = QueueDiagnosticSample { + queue_length, + event_type: QUEUE_DIAGNOSTIC_SAMPLE_EVENT_NAME, + }; + + self.send_newrelic(&sample, account_id, api_key).await + } + }; + + if let Err(error) = result { + log::error!("reporting worker unable to send event - {error}"); + } + } + } + + /// Sends events along to the newrelic api. + async fn send_newrelic(&self, data: T, account_id: S, api_key: S) -> io::Result<()> + where + T: serde::Serialize, + S: AsRef, + { + surf::post(format!( + "https://insights-collector.newrelic.com/v1/accounts/{}/event", + account_id.as_ref() + )) + .header("Accept", "*/*") + .header("Api-Key", api_key.as_ref()) + .body_json(&data) + .map_err(|error| { + io::Error::new( + io::ErrorKind::Other, + format!("Unable to serialize queue sample - {error}"), + ) + })? + .await + .map(|_| ()) + .map_err(|error| io::Error::new(io::ErrorKind::Other, error.to_string())) + } +} diff --git a/src/beetle-srv/src/schema/device_state.rs b/src/beetle-srv/src/schema/device_state.rs index 8d7bae6..62bd75a 100644 --- a/src/beetle-srv/src/schema/device_state.rs +++ b/src/beetle-srv/src/schema/device_state.rs @@ -18,10 +18,19 @@ pub struct DeviceRenderingStateMessageEntry { #[serde(rename_all = "snake_case", tag = "beetle:kind", content = "beetle:content")] pub enum DeviceRenderingState { /// The layout of our state for rendering a calendar. - ScheduleLayout(Vec, Vec), + ScheduleLayout { + /// The latest list of events that will be rendered. + events: Vec, + + /// The latest list of messages that will be rendered. + messages: Vec, + }, /// Just a list of messages. - MessageList(Vec), + MessageList { + /// The list of messages. + messages: Vec, + }, } /// This schema is the long-lived representation of what is being rendered to a device. @@ -46,5 +55,8 @@ pub enum DeviceStateMessageOrigin { Unknown, /// This message came from a user. The string is their nickname. - User(String), + User { + /// The nickname of the user. + nickname: String, + }, } diff --git a/src/beetle-srv/src/schema/mod.rs b/src/beetle-srv/src/schema/mod.rs index 578c638..aa78578 100644 --- a/src/beetle-srv/src/schema/mod.rs +++ b/src/beetle-srv/src/schema/mod.rs @@ -50,14 +50,30 @@ fn format_datetime(datetime: &chrono::DateTime) -> String { #[serde(rename_all = "snake_case", tag = "beetle:kind", content = "beetle:content")] pub enum DeviceAuthorityModel { /// When a device is in an exclusive authority model, only a single user can manage it. - Exclusive(String), + Exclusive { + /// The id of the owner. + owner: String, + }, /// When a device is in a shared authority model, a list of users can manage it. - Shared(String, Vec), + Shared { + /// The original owner. + owner: String, + + /// A list of other user ids that can manage this device. + guests: Vec, + }, /// When a device is in an "open" model, anyone can send things to it. We will retain the list of /// folks who have added themselves as a way to transition easily into "shared". - Public(String, Vec), + Public { + /// In this state, the owner is the only one who can toggle the ownership off of "public". + owner: String, + + /// The guest list here is cosmetic; users in this list will be re-added to the shared list, + /// but otherwise are not special. + guests: Vec, + }, } /// The schema of our records that are stored in `device_histories` collection. @@ -114,7 +130,10 @@ pub enum DeviceDiagnosticRegistration { pub enum DeviceScheduleKind { /// The most basic kind of schedule. The `String` held by this variant is the user id for whom we /// should fetch events. - UserEventsBasic(String), + UserEventsBasic { + /// The id of our user. + user_oid: String, + }, } /// A schedule of things to render for a specific device. @@ -127,6 +146,13 @@ pub struct DeviceSchedule { /// The timestamp of the last executed attempt. pub last_executed: Option, + /// A unique value that is updated when refreshes are queued and sent along with the job. If + /// multiple refresh jobs are queued in a row, ones that do not match are ignored. + pub refresh_nonce: Option, + + /// The latest refresh nonce. If this does not match a refresh job, it is ignored. + pub latest_refresh_nonce: Option, + /// The underlying schedule implementation. pub kind: Option, } diff --git a/src/beetle-srv/src/vendor/google.rs b/src/beetle-srv/src/vendor/google.rs index 987aa4a..0fd8826 100644 --- a/src/beetle-srv/src/vendor/google.rs +++ b/src/beetle-srv/src/vendor/google.rs @@ -143,8 +143,9 @@ pub struct TokenHandle { #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(tag = "beetle:kind", content = "beetle:content")] pub enum ParsedEventTimeMarker { - /// A whole-day event. + /// A whole-day event. YYYY-MM-DD Date(u32, u32, u32), + /// An event with a specific time. DateTime(chrono::DateTime), } @@ -171,6 +172,7 @@ pub fn parse_event(event: &EventListEntry) -> anyhow::Result { match (date_container, datetime_container) { (Some((start, end)), None) => { log::trace!("found whole-day event {start} - {end}"); + let start = parse_event_date(start)?; let end = parse_event_date(end)?; @@ -183,6 +185,7 @@ pub fn parse_event(event: &EventListEntry) -> anyhow::Result { } (None, Some((start, end))) => { log::trace!("found timed event {start} - {end}"); + let start = chrono::DateTime::parse_from_rfc3339(start.as_str()).with_context(|| "invalid date")?; let end = chrono::DateTime::parse_from_rfc3339(end.as_str()).with_context(|| "invalid date")?; @@ -226,7 +229,7 @@ pub async fn fetch_primary(handle: &TokenHandle) -> anyhow::Result anyhow::Result anyhow::Result> { - log::debug!("fetching calendar '{}'", calendar.id); + log::trace!("fetching calendar '{}'", calendar.id); let mut uri = url::Url::parse( format!( " https://www.googleapis.com/calendar/v3/calendars/{}/events", @@ -273,7 +276,7 @@ pub async fn fetch_events(handle: &TokenHandle, calendar: &CalendarListEntry) -> let events = serde_json::from_str::(&body_string).with_context(|| "failed event list parse")?; - log::debug!("found {} items in calendar", events.items.len()); + log::trace!("found {} items in calendar", events.items.len()); Ok(events.items) } @@ -286,14 +289,15 @@ pub async fn fetch_user(handle: &TokenHandle) -> anyhow::Result { let mut res = surf::get(&url) .header("Authorization", format!("Bearer {}", handle.token.access_token)) .await - .map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) - .with_context(|| "profile request failed")?; - - let userinfo = res - .body_json::() - .await - .map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) - .with_context(|| "body read failed")?; + .map_err(|e| { + let error = format!("request for user info failed - {e:?}"); + io::Error::new(io::ErrorKind::Other, error) + })?; + + let userinfo = res.body_json::().await.map_err(|e| { + let error = format!("unable to parse user info payload - {e:?}"); + io::Error::new(io::ErrorKind::Other, error) + })?; log::trace!("profile - '{}'", userinfo.id); diff --git a/src/beetle-srv/src/vendor/mod.rs b/src/beetle-srv/src/vendor/mod.rs index e8f9904..76897d9 100644 --- a/src/beetle-srv/src/vendor/mod.rs +++ b/src/beetle-srv/src/vendor/mod.rs @@ -2,3 +2,6 @@ /// Google api functionality. pub mod google; + +/// Newrelic configuration. +pub mod newrelic; diff --git a/src/beetle-srv/src/vendor/newrelic.rs b/src/beetle-srv/src/vendor/newrelic.rs new file mode 100644 index 0000000..365be8b --- /dev/null +++ b/src/beetle-srv/src/vendor/newrelic.rs @@ -0,0 +1,11 @@ +use serde::Deserialize; + +/// Newrelic configuration. +#[derive(Debug, Deserialize, Clone)] +pub struct NewRelic { + /// The newrelic account id. + pub account_id: String, + + /// The newrelic license key. + pub api_key: String, +} diff --git a/src/beetle-ui/elm.json b/src/beetle-ui/elm.json index 9792ce1..218f794 100644 --- a/src/beetle-ui/elm.json +++ b/src/beetle-ui/elm.json @@ -14,7 +14,8 @@ "elm/random": "1.0.0", "elm/time": "1.0.0", "elm/url": "1.0.0", - "hecrj/html-parser": "2.4.0" + "hecrj/html-parser": "2.4.0", + "rtfeldman/elm-iso8601-date-strings": "1.1.4" }, "indirect": { "elm/bytes": "1.0.8", diff --git a/src/beetle-ui/src/Environment.elm b/src/beetle-ui/src/Environment.elm index a904636..9201d0d 100644 --- a/src/beetle-ui/src/Environment.elm +++ b/src/beetle-ui/src/Environment.elm @@ -22,8 +22,11 @@ module Environment exposing import Browser.Navigation as Nav import Dict import Html +import Html.Attributes as A import Http +import Iso8601 as Date import Json.Decode +import Time import Url @@ -43,6 +46,7 @@ type alias Configuration = { api : String , apiDocsUrl : String , root : String + , version : String , loginUrl : String , logoutUrl : String , localization : List ( String, String ) @@ -164,6 +168,39 @@ statusDecoder = (Json.Decode.field "timestamp" Json.Decode.string) +formatVersionTime : Time.Posix -> String +formatVersionTime stamp = + let + zone = + Time.utc + in + String.join ":" + [ String.fromInt (Time.toHour zone stamp) |> String.padLeft 2 '0' + , String.fromInt (Time.toMinute zone stamp) |> String.padLeft 2 '0' + ] + ++ "UTC" + + +versionInfo : Environment -> StatusResponse -> Html.Html Message +versionInfo env response = + let + parsed = + Date.toTime response.timestamp + + timeString = + Result.map formatVersionTime parsed |> Result.withDefault response.timestamp + + apiVersion = + "api: " ++ String.slice 0 7 response.version + + uiVersion = + "ui: " ++ String.slice 0 7 env.configuration.version + in + Html.div + [ A.class "flex items-center" ] + (List.map (\t -> Html.span [ A.class "px-2 mx-1 block bg-neutral-900 rounded" ] [ Html.text t ]) [ apiVersion, uiVersion, timeString ]) + + statusFooter : Environment -> Html.Html Message statusFooter env = case env.status of @@ -173,7 +210,7 @@ statusFooter env = Html.div [] [ Html.text (String.concat [ "failed: ", error ]) ] Ok response -> - Html.div [] [ Html.text (String.concat [ String.slice 0 7 response.version, " @ ", response.timestamp ]) ] + Html.div [] [ versionInfo env response ] Nothing -> Html.div [] [ Html.text "Connecting..." ] diff --git a/src/beetle-ui/src/Route.elm b/src/beetle-ui/src/Route.elm index 630916a..d73966a 100644 --- a/src/beetle-ui/src/Route.elm +++ b/src/beetle-ui/src/Route.elm @@ -230,15 +230,14 @@ routeLoadedEnv env url maybeId = Redirect (Environment.buildRoutePath env "home") - --- Given a URL, try to match on the route. The first thing this will do is try to find an id --- associated with the session/environment and match on "authenticated" routes. - - fromUrl : Environment.Environment -> Url.Url -> RouteInitialization fromUrl env url = - Environment.getLoadedId env - |> Maybe.map (routeLoadedEnv env url) + -- This is a maybe of a maybe. We don't want to redirect if there is no session _yet_. + let + maybeLoadedId = + Environment.getLoadedId env + in + Maybe.map (routeLoadedEnv env url) maybeLoadedId |> Maybe.withDefault (Matched ( Nothing, Cmd.none )) diff --git a/tools/beetle-mock/Cargo.toml b/tools/beetle-mock/Cargo.toml index fffc8ab..0b31f94 100644 --- a/tools/beetle-mock/Cargo.toml +++ b/tools/beetle-mock/Cargo.toml @@ -27,6 +27,3 @@ url = { version = "^2.3.1" } # Enabling this feature will allow developers to use a "naked" tcp stream for redis connections, instead of # the default, which is to use `async_tls`. redis-insecure = [] - -[patch.crates-io] -kramer = { git = "https://github.com/sizethree/kramer.git", branch = "async-read" } diff --git a/tools/beetle-mock/src/main.rs b/tools/beetle-mock/src/main.rs index 1206a82..aa635a9 100644 --- a/tools/beetle-mock/src/main.rs +++ b/tools/beetle-mock/src/main.rs @@ -421,6 +421,7 @@ impl iced::Application for BeetleMock { } fn update(&mut self, message: Self::Message) -> iced::Command { + #[allow(clippy::single_match)] match message { Self::Message::SetImage(buffer) => self.latest_image = Some(buffer), _ => (),