diff --git a/.github/workflows/js-checks.yml b/.github/workflows/js-checks.yml index 8ec9579c847..76c685fe8fb 100644 --- a/.github/workflows/js-checks.yml +++ b/.github/workflows/js-checks.yml @@ -117,6 +117,8 @@ jobs: - name: Build package and dependencies run: yarn build --filter "+${{ inputs.package }}" + env: + NODE_ENV: test - name: Build SDK run: yarn build --filter "+dash" diff --git a/.pnp.cjs b/.pnp.cjs index ed01f183c8e..e2bcb01108c 100755 --- a/.pnp.cjs +++ b/.pnp.cjs @@ -2916,6 +2916,7 @@ function $$SETUP_STATE(hydrateRuntimeState, basePath) { ["@dashevo/rs-drive", "workspace:packages/rs-drive-nodejs"],\ ["@dashevo/dashcore-lib", "https://github.com/dashevo/dashcore-lib.git#commit=3527419e8739b5e7d4017028d642dba8851c3e25"],\ ["@dashevo/dpp", "workspace:packages/js-dpp"],\ + ["@dashevo/withdrawals-contract", "workspace:packages/withdrawals-contract"],\ ["cargo-cp-artifact", "npm:0.1.6"],\ ["cbor", "npm:8.1.0"],\ ["chai", "npm:4.3.4"],\ diff --git a/Cargo.lock b/Cargo.lock index 2ce411ac9ed..ca9f7a106a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,12 +17,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" -[[package]] -name = "ahash" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8fd72866655d1904d6b0997d0b07ba561047d070fbe29de039031c641b61217" - [[package]] name = "ahash" version = "0.7.6" @@ -205,7 +199,6 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "006cc91e1a1d99819bc5b8214be3555c1f0611b169f527a1fdc54ed1f2b745b0" dependencies = [ - "core2", "serde", ] @@ -348,18 +341,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" -[[package]] -name = "bstr" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" -dependencies = [ - "lazy_static", - "memchr", - "regex-automata", - "serde", -] - [[package]] name = "bumpalo" version = "3.12.0" @@ -493,6 +474,7 @@ dependencies = [ "js-sys", "num-integer", "num-traits", + "serde", "time 0.1.45", "wasm-bindgen", "winapi", @@ -587,15 +569,6 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" -[[package]] -name = "core2" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239fa3ae9b63c2dc74bd3fa852d4792b8b305ae64eeede946265b6af62f1fff3" -dependencies = [ - "memchr", -] - [[package]] name = "costs" version = "1.0.0" @@ -742,13 +715,12 @@ dependencies = [ [[package]] name = "csv" -version = "1.1.6" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" +checksum = "af91f40b7355f82b0a891f50e70399475945bb0b0da4f1700ce60761c9d3e359" dependencies = [ - "bstr", "csv-core", - "itoa 0.4.8", + "itoa", "ryu", "serde", ] @@ -816,16 +788,49 @@ dependencies = [ "syn", ] +[[package]] +name = "darling" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0808e1bd8671fb44a113a14e13497557533369847788fa2ae912b6ebfce9fa8" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "001d80444f28e193f30c2f293455da62dcf9a6b29918a4253152ae2b1de592cb" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b36230598a2d5de7ec1c6f51f72d8a99a9208daff41de2084d06e3fd3ea56685" +dependencies = [ + "darling_core", + "quote", + "syn", +] + [[package]] name = "dashcore" version = "0.29.1" -source = "git+https://github.com/dashevo/rust-dashcore?branch=master#51548a4a1b9eca7430f5f3caf94d9784886ff2e9" +source = "git+https://github.com/dashevo/rust-dashcore?rev=51548a4a1b9eca7430f5f3caf94d9784886ff2e9#51548a4a1b9eca7430f5f3caf94d9784886ff2e9" dependencies = [ "anyhow", "bech32", "bitcoin_hashes 0.10.0", - "core2", - "hashbrown 0.8.2", "hex", "rustversion", "secp256k1", @@ -833,18 +838,26 @@ dependencies = [ ] [[package]] -name = "dashcore" -version = "0.29.1" -source = "git+https://github.com/dashpay/rust-dashcore?branch=master#51548a4a1b9eca7430f5f3caf94d9784886ff2e9" +name = "dashcore-rpc" +version = "0.15.0" +source = "git+https://github.com/jawid-h/rust-dashcore-rpc?branch=fix/attempt-to-fix#5dbc28583ef77d0b6eca3b3519448569bc41ce8d" dependencies = [ - "anyhow", - "bech32", - "bitcoin_hashes 0.10.0", - "core2", - "hashbrown 0.8.2", - "hex", - "rustversion", - "secp256k1", + "dashcore-rpc-json", + "jsonrpc", + "log", + "serde", + "serde_json", +] + +[[package]] +name = "dashcore-rpc-json" +version = "0.15.0" +source = "git+https://github.com/jawid-h/rust-dashcore-rpc?branch=fix/attempt-to-fix#5dbc28583ef77d0b6eca3b3519448569bc41ce8d" +dependencies = [ + "dashcore", + "serde", + "serde_json", + "serde_with", ] [[package]] @@ -925,7 +938,7 @@ dependencies = [ "byteorder", "chrono", "ciborium", - "dashcore 0.29.1 (git+https://github.com/dashevo/rust-dashcore?branch=master)", + "dashcore", "data-contracts", "env_logger", "futures", @@ -967,7 +980,7 @@ dependencies = [ "ciborium", "costs", "criterion", - "dashcore 0.29.1 (git+https://github.com/dashpay/rust-dashcore?branch=master)", + "dashcore", "dpp", "enum-map", "grovedb", @@ -977,6 +990,7 @@ dependencies = [ "intmap", "itertools", "lazy_static", + "mockall", "moka", "nohash-hasher", "rand", @@ -999,9 +1013,13 @@ dependencies = [ "bs58", "chrono", "ciborium", - "dashcore 0.29.1 (git+https://github.com/dashpay/rust-dashcore?branch=master)", + "dashcore", + "dashcore-rpc", + "dpp", "drive", "hex", + "itertools", + "mockall", "rand", "rust_decimal", "rust_decimal_macros", @@ -1124,9 +1142,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] @@ -1158,6 +1176,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + [[package]] name = "form_urlencoded" version = "1.1.0" @@ -1357,16 +1381,6 @@ version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" -[[package]] -name = "hashbrown" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b62f79061a0bc2e046024cb7ba44b08419ed238ecbd9adbd787434b9e8c25" -dependencies = [ - "ahash 0.3.8", - "autocfg", -] - [[package]] name = "hashbrown" version = "0.12.3" @@ -1465,6 +1479,12 @@ dependencies = [ "cxx-build", ] +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + [[package]] name = "idna" version = "0.3.0" @@ -1483,6 +1503,7 @@ checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" dependencies = [ "autocfg", "hashbrown 0.12.3", + "serde", ] [[package]] @@ -1527,12 +1548,6 @@ dependencies = [ "either", ] -[[package]] -name = "itoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" - [[package]] name = "itoa" version = "1.0.5" @@ -1601,6 +1616,17 @@ dependencies = [ "url", ] +[[package]] +name = "jsonrpc" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "248ea3eab5d9a37ee324b1f576f19d274ff7e56cd5206ea4755a7ef918e94fa4" +dependencies = [ + "base64 0.13.1", + "serde", + "serde_json", +] + [[package]] name = "jsonschema" version = "0.15.0" @@ -1613,7 +1639,7 @@ dependencies = [ "fancy-regex", "fraction", "iso8601", - "itoa 1.0.5", + "itoa", "lazy_static", "memchr", "num-cmp", @@ -1811,14 +1837,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -2482,12 +2508,6 @@ dependencies = [ "regex-syntax", ] -[[package]] -name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" - [[package]] name = "regex-syntax" version = "0.6.28" @@ -2505,18 +2525,18 @@ dependencies = [ [[package]] name = "rend" -version = "0.3.6" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79af64b4b6362ffba04eef3a4e10829718a4896dac19daa741851c86781edf95" +checksum = "581008d2099240d37fb08d77ad713bcaec2c4d89d50b5b21a8bb1996bbab68ab" dependencies = [ "bytecheck", ] [[package]] name = "rkyv" -version = "0.7.39" +version = "0.7.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cec2b3485b07d96ddfd3134767b8a447b45ea4eb91448d0a35180ec0ffd5ed15" +checksum = "c30f1d45d9aa61cbc8cd1eb87705470892289bb2d01943e7803b873a57404dc3" dependencies = [ "bytecheck", "hashbrown 0.12.3", @@ -2528,9 +2548,9 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.39" +version = "0.7.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eaedadc88b53e36dd32d940ed21ae4d850d5916f2581526921f553a72ac34c4" +checksum = "ff26ed6c7c4dfc2aa9480b86a60e3c7233543a270a680e10758a507c5a4ce476" dependencies = [ "proc-macro2", "quote", @@ -2737,7 +2757,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" dependencies = [ "indexmap", - "itoa 1.0.5", + "itoa", "ryu", "serde", ] @@ -2753,6 +2773,34 @@ dependencies = [ "syn", ] +[[package]] +name = "serde_with" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30d904179146de381af4c93d3af6ca4984b3152db687dacb9c3c35e86f39809c" +dependencies = [ + "base64 0.13.1", + "chrono", + "hex", + "indexmap", + "serde", + "serde_json", + "serde_with_macros", + "time 0.3.17", +] + +[[package]] +name = "serde_with_macros" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1966009f3c05f095697c537312f5415d1e3ed31ce0a56942bac4c771c5c335e" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "sha2" version = "0.9.9" @@ -2785,9 +2833,9 @@ checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] @@ -2859,6 +2907,12 @@ dependencies = [ "visualize", ] +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + [[package]] name = "strum" version = "0.24.1" @@ -3030,6 +3084,7 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" dependencies = [ + "itoa", "serde", "time-core", "time-macros", diff --git a/packages/js-drive/lib/abci/handlers/finalizeBlockHandlerFactory.js b/packages/js-drive/lib/abci/handlers/finalizeBlockHandlerFactory.js index bcbdd7d1f1c..1d3dfdaf4fb 100644 --- a/packages/js-drive/lib/abci/handlers/finalizeBlockHandlerFactory.js +++ b/packages/js-drive/lib/abci/handlers/finalizeBlockHandlerFactory.js @@ -6,6 +6,7 @@ const { }, }, } = require('@dashevo/abci/types'); + const lodashCloneDeep = require('lodash/cloneDeep'); /** @@ -13,24 +14,24 @@ const lodashCloneDeep = require('lodash/cloneDeep'); * @return {finalizeBlockHandler} * @param {GroveDBStore} groveDBStore * @param {BlockExecutionContextRepository} blockExecutionContextRepository - * @param {CoreRpcClient} coreRpcClient * @param {BaseLogger} logger * @param {ExecutionTimer} executionTimer * @param {BlockExecutionContext} latestBlockExecutionContext * @param {BlockExecutionContext} proposalBlockExecutionContext * @param {processProposal} processProposal + * @param {broadcastWithdrawalTransactions} broadcastWithdrawalTransactions * @param {createContextLogger} createContextLogger * */ function finalizeBlockHandlerFactory( groveDBStore, blockExecutionContextRepository, - coreRpcClient, logger, executionTimer, latestBlockExecutionContext, proposalBlockExecutionContext, processProposal, + broadcastWithdrawalTransactions, createContextLogger, ) { /** @@ -96,6 +97,18 @@ function finalizeBlockHandlerFactory( await processProposal(processProposalRequest, contextLogger); } + // Send withdrawal transactions to Core + const unsignedWithdrawalTransactionsMap = proposalBlockExecutionContext + .getWithdrawalTransactionsMap(); + + const { thresholdVoteExtensions } = commitInfo; + + await broadcastWithdrawalTransactions( + proposalBlockExecutionContext, + thresholdVoteExtensions, + unsignedWithdrawalTransactionsMap, + ); + proposalBlockExecutionContext.setLastCommitInfo(commitInfo); // Store proposal block execution context @@ -112,30 +125,6 @@ function finalizeBlockHandlerFactory( // Update last block execution context with proposal data latestBlockExecutionContext.populate(proposalBlockExecutionContext); - // Send withdrawal transactions to Core - const unsignedWithdrawalTransactionsMap = proposalBlockExecutionContext - .getWithdrawalTransactionsMap(); - - const { thresholdVoteExtensions } = commitInfo; - - for (const { extension, signature } of (thresholdVoteExtensions || [])) { - const withdrawalTransactionHash = extension.toString('hex'); - - const unsignedWithdrawalTransactionBytes = unsignedWithdrawalTransactionsMap[ - withdrawalTransactionHash - ]; - - if (unsignedWithdrawalTransactionBytes) { - const transactionBytes = Buffer.concat([ - unsignedWithdrawalTransactionBytes, - signature, - ]); - - // TODO: think about Core error handling - await coreRpcClient.sendRawTransaction(transactionBytes.toString('hex')); - } - } - proposalBlockExecutionContext.reset(); const blockExecutionTimings = executionTimer.stopTimer('blockExecution'); diff --git a/packages/js-drive/lib/abci/handlers/prepareProposalHandlerFactory.js b/packages/js-drive/lib/abci/handlers/prepareProposalHandlerFactory.js index d1ed6b616ce..9f36462f661 100644 --- a/packages/js-drive/lib/abci/handlers/prepareProposalHandlerFactory.js +++ b/packages/js-drive/lib/abci/handlers/prepareProposalHandlerFactory.js @@ -54,6 +54,7 @@ function prepareProposalHandlerFactory( proposerProTxHash, proposedAppVersion, round, + quorumHash, } = request; const contextLogger = createContextLogger(logger, { @@ -80,6 +81,7 @@ function prepareProposalHandlerFactory( proposerProTxHash: Buffer.from(proposerProTxHash), proposedAppVersion, round, + quorumHash, }, contextLogger, ); @@ -91,7 +93,7 @@ function prepareProposalHandlerFactory( const blockFees = { storageFee: 0, processingFee: 0, - refundsPerEpoch: { }, + refundsPerEpoch: {}, }; let validTxCount = 0; diff --git a/packages/js-drive/lib/abci/handlers/proposal/beginBlockFactory.js b/packages/js-drive/lib/abci/handlers/proposal/beginBlockFactory.js index 2e444927d8c..a159db141e4 100644 --- a/packages/js-drive/lib/abci/handlers/proposal/beginBlockFactory.js +++ b/packages/js-drive/lib/abci/handlers/proposal/beginBlockFactory.js @@ -20,6 +20,7 @@ const protoTimestampToMillis = require('../../../util/protoTimestampToMillis'); * @param {synchronizeMasternodeIdentities} synchronizeMasternodeIdentities * @param {RSAbci} rsAbci * @param {ExecutionTimer} executionTimer + * @param {LastSyncedCoreHeightRepository} lastSyncedCoreHeightRepository * @param {SimplifiedMasternodeList} simplifiedMasternodeList * * @return {beginBlock} @@ -36,6 +37,7 @@ function beginBlockFactory( synchronizeMasternodeIdentities, rsAbci, executionTimer, + lastSyncedCoreHeightRepository, simplifiedMasternodeList, ) { /** @@ -48,6 +50,7 @@ function beginBlockFactory( * @param {Long} request.proposedAppVersion * @param {ITimestamp} request.time * @param {Buffer} request.proposerProTxHash + * @param {Buffer} request.quorumHash * @param {BaseLogger} contextLogger * * @return {Promise} @@ -62,6 +65,7 @@ function beginBlockFactory( proposerProTxHash, proposedAppVersion, round, + quorumHash, } = request; if (proposalBlockExecutionContext.isEmpty()) { @@ -120,6 +124,12 @@ function beginBlockFactory( await groveDBStore.startTransaction(); + const lastSyncedHeightResult = await lastSyncedCoreHeightRepository.fetch({ + useTransaction: true, + }); + + const lastSyncedCoreHeight = lastSyncedHeightResult.getValue() || 0; + // Call RS ABCI /** @@ -129,8 +139,9 @@ function beginBlockFactory( blockHeight: height.toNumber(), blockTimeMs: proposalBlockExecutionContext.getTimeMs(), proposerProTxHash, - // TODO replace with real value - validatorSetQuorumHash: Buffer.alloc(32), + validatorSetQuorumHash: quorumHash, + coreChainLockedHeight, + lastSyncedCoreHeight, // TODO: Since we don't have HPMNs now and every masternode can be a validator, // we pass the whole list totalHpmns: simplifiedMasternodeList.getStore() diff --git a/packages/js-drive/lib/abci/handlers/proposal/broadcastWithdrawalTransactionsFactory.js b/packages/js-drive/lib/abci/handlers/proposal/broadcastWithdrawalTransactionsFactory.js new file mode 100644 index 00000000000..3bc4818b684 --- /dev/null +++ b/packages/js-drive/lib/abci/handlers/proposal/broadcastWithdrawalTransactionsFactory.js @@ -0,0 +1,64 @@ +const BlockInfo = require('../../../blockExecution/BlockInfo'); + +/** + * @param {CoreRpcClient} coreRpcClient + * @param {updateWithdrawalTransactionIdAndStatus} updateWithdrawalTransactionIdAndStatus + * + * @return {broadcastWithdrawalTransactions} + */ +function broadcastWithdrawalTransactionsFactory( + coreRpcClient, + updateWithdrawalTransactionIdAndStatus, +) { + /** + * @typedef broadcastWithdrawalTransactions + * + * @param {BlockExecutionContext} proposalBlockExecutionContext + * @param {{{ extension: Buffer, signature: Buffer }}[]} thresholdVoteExtensions + * @param {Object} unsignedWithdrawalTransactionsMap + * + * @return {Promise} + */ + async function broadcastWithdrawalTransactions( + proposalBlockExecutionContext, + thresholdVoteExtensions, + unsignedWithdrawalTransactionsMap, + ) { + const blockInfo = BlockInfo.createFromBlockExecutionContext(proposalBlockExecutionContext); + + const transactionIdMap = {}; + + for (const { extension, signature } of (thresholdVoteExtensions || [])) { + const withdrawalTransactionHash = extension.toString('hex'); + + const unsignedWithdrawalTransactionBytes = unsignedWithdrawalTransactionsMap[ + withdrawalTransactionHash + ]; + + if (unsignedWithdrawalTransactionBytes) { + const transactionBytes = Buffer.concat([ + unsignedWithdrawalTransactionBytes, + signature, + ]); + + transactionIdMap[unsignedWithdrawalTransactionBytes.toString('hex')] = transactionBytes; + + // TODO: think about Core error handling + await coreRpcClient.sendRawTransaction(transactionBytes.toString('hex')); + } + } + + await updateWithdrawalTransactionIdAndStatus( + blockInfo, + proposalBlockExecutionContext.getCoreChainLockedHeight(), + transactionIdMap, + { + useTransaction: true, + }, + ); + } + + return broadcastWithdrawalTransactions; +} + +module.exports = broadcastWithdrawalTransactionsFactory; diff --git a/packages/js-drive/lib/abci/handlers/proposal/processProposalFactory.js b/packages/js-drive/lib/abci/handlers/proposal/processProposalFactory.js index a3cb00394fd..85907f69401 100644 --- a/packages/js-drive/lib/abci/handlers/proposal/processProposalFactory.js +++ b/packages/js-drive/lib/abci/handlers/proposal/processProposalFactory.js @@ -44,6 +44,7 @@ function processProposalFactory( proposerProTxHash, proposedAppVersion, round, + quorumHash, } = request; contextLogger.info(`Processing a block proposal for height #${height} round #${round}`); @@ -58,6 +59,7 @@ function processProposalFactory( proposerProTxHash: Buffer.from(proposerProTxHash), proposedAppVersion, round, + quorumHash, }, contextLogger, ); @@ -66,7 +68,7 @@ function processProposalFactory( const blockFees = { storageFee: 0, processingFee: 0, - refundsPerEpoch: { }, + refundsPerEpoch: {}, }; let validTxCount = 0; diff --git a/packages/js-drive/lib/abci/handlers/verifyVoteExtensionHandlerFactory.js b/packages/js-drive/lib/abci/handlers/verifyVoteExtensionHandlerFactory.js index 948e539ce0b..c414ef3e4a7 100644 --- a/packages/js-drive/lib/abci/handlers/verifyVoteExtensionHandlerFactory.js +++ b/packages/js-drive/lib/abci/handlers/verifyVoteExtensionHandlerFactory.js @@ -3,6 +3,9 @@ const { abci: { ResponseVerifyVoteExtension, }, + types: { + VoteExtensionType, + }, }, } = require('@dashevo/abci/types'); @@ -19,20 +22,67 @@ const verifyStatus = { function verifyVoteExtensionHandlerFactory(proposalBlockExecutionContext) { /** * @typedef verifyVoteExtensionHandler + * + * @param {abci.RequestVerifyVoteExtension} request + * * @return {Promise} */ - async function verifyVoteExtensionHandler() { + async function verifyVoteExtensionHandler(request) { + const { + voteExtensions, + } = request; + const contextLogger = proposalBlockExecutionContext.getContextLogger() .child({ abciMethod: 'verifyVoteExtension', }); contextLogger.debug('VerifyVote ABCI method requested'); + contextLogger.trace({ request }); + + const unsignedWithdrawalTransactionsMap = proposalBlockExecutionContext + .getWithdrawalTransactionsMap(); + + const voteExtensionsToCheck = Object.keys(unsignedWithdrawalTransactionsMap || {}) + .sort() + .map((txHashHex) => ({ + type: VoteExtensionType.THRESHOLD_RECOVER, + extension: Buffer.from(txHashHex, 'hex'), + })); + + const numberOfVoteExtensionsMatch = ( + voteExtensionsToCheck.length === (voteExtensions || []).length + ); + + const allVoteExtensionsPresent = voteExtensionsToCheck.reduce((result, nextExtension) => { + const searchedVoteExtension = (voteExtensions || []).find((voteExtension) => ( + voteExtension.type === nextExtension.type + && Buffer.compare(voteExtension.extension, nextExtension.extension) + )); + + if (!searchedVoteExtension) { + const extensionString = nextExtension.extension.toString('hex'); + + const extensionTruncatedString = extensionString.substring( + 0, + Math.min(30, extensionString.length), + ); + + contextLogger.warn({ + type: nextExtension.type, + extension: extensionString, + }, `${nextExtension.type} vote extension ${extensionTruncatedString}... was not found in verify request`); + } + + return result && (searchedVoteExtension !== undefined); + }, true); - // TODO Verify withdrawal vote extensions and add logs + const status = (numberOfVoteExtensionsMatch && allVoteExtensionsPresent) + ? verifyStatus.ACCEPT + : verifyStatus.REJECT; return new ResponseVerifyVoteExtension({ - status: verifyStatus.ACCEPT, + status, }); } diff --git a/packages/js-drive/lib/createDIContainer.js b/packages/js-drive/lib/createDIContainer.js index 565351f5818..d87029bb822 100644 --- a/packages/js-drive/lib/createDIContainer.js +++ b/packages/js-drive/lib/createDIContainer.js @@ -135,6 +135,9 @@ const handleUpdatedScriptPayoutFactory = require('./identity/masternode/handleUp const getWithdrawPubKeyTypeFromPayoutScriptFactory = require('./identity/masternode/getWithdrawPubKeyTypeFromPayoutScriptFactory'); const getPublicKeyFromPayoutScript = require('./identity/masternode/getPublicKeyFromPayoutScript'); +const updateWithdrawalTransactionIdAndStatusFactory = require('./identity/withdrawals/updateWithdrawalTransactionIdAndStatusFactory'); +const broadcastWithdrawalTransactionsFactory = require('./abci/handlers/proposal/broadcastWithdrawalTransactionsFactory'); + const DocumentRepository = require('./document/DocumentRepository'); const ExecutionTimer = require('./util/ExecutionTimer'); const noopLoggerInstance = require('./util/noopLogger'); @@ -475,9 +478,22 @@ function createDIContainer(options) { groveDBLatestFile, dataContractsGlobalCacheSize, dataContractsBlockCacheSize, + coreJsonRpcHost, + coreJsonRpcPort, + coreJsonRpcUsername, + coreJsonRpcPassword, ) => new RSDrive(groveDBLatestFile, { - dataContractsGlobalCacheSize, - dataContractsBlockCacheSize, + drive: { + dataContractsGlobalCacheSize, + dataContractsBlockCacheSize, + }, + core: { + rpc: { + url: `${coreJsonRpcHost}:${coreJsonRpcPort}`, + username: coreJsonRpcUsername, + password: coreJsonRpcPassword, + }, + }, })) .disposer(async (rsDrive) => { // Flush data on disk @@ -726,6 +742,16 @@ function createDIContainer(options) { validatorSet: asClass(ValidatorSet), }); + /** + * Register withrawals stuff + */ + container.register({ + updateWithdrawalTransactionIdAndStatus: asFunction( + updateWithdrawalTransactionIdAndStatusFactory, + ), + broadcastWithdrawalTransactions: asFunction(broadcastWithdrawalTransactionsFactory), + }); + /** * Register feature flags stuff */ diff --git a/packages/js-drive/lib/dpp/DriveStateRepository.js b/packages/js-drive/lib/dpp/DriveStateRepository.js index 7cd09d42c23..173c9f92efa 100644 --- a/packages/js-drive/lib/dpp/DriveStateRepository.js +++ b/packages/js-drive/lib/dpp/DriveStateRepository.js @@ -615,9 +615,12 @@ class DriveStateRepository { * @returns {Promise} */ async fetchLatestWithdrawalTransactionIndex() { - // TODO: handle dry run via passing state transition execution context + const blockInfo = BlockInfo.createFromBlockExecutionContext(this.blockExecutionContext); + return this.rsDrive.fetchLatestWithdrawalTransactionIndex( + blockInfo, this.#options.useTransaction, + this.#options.dryRun, ); } @@ -630,10 +633,13 @@ class DriveStateRepository { * @returns {Promise} */ async enqueueWithdrawalTransaction(index, transactionBytes) { + const blockInfo = BlockInfo.createFromBlockExecutionContext(this.blockExecutionContext); + // TODO: handle dry run via passing state transition execution context return this.rsDrive.enqueueWithdrawalTransaction( index, transactionBytes, + blockInfo, this.#options.useTransaction, ); } diff --git a/packages/js-drive/lib/identity/withdrawals/updateWithdrawalTransactionIdAndStatusFactory.js b/packages/js-drive/lib/identity/withdrawals/updateWithdrawalTransactionIdAndStatusFactory.js new file mode 100644 index 00000000000..abd8b2c562e --- /dev/null +++ b/packages/js-drive/lib/identity/withdrawals/updateWithdrawalTransactionIdAndStatusFactory.js @@ -0,0 +1,76 @@ +const WITHDRAWALS_DOCUMENT_TYPE = 'withdrawal'; + +const WITHDRAWALS_STATUS_POOLED = 1; +const WITHDRAWALS_STATUS_BROADCASTED = 2; + +/** + * @param {DocumentRepository} documentRepository + * @param {fetchDocuments} fetchDocuments + * @param {Identifier} withdrawalsContractId + * + * @returns {updateWithdrawalTransactionIdAndStatus} + */ +function updateWithdrawalTransactionIdAndStatusFactory( + documentRepository, + fetchDocuments, + withdrawalsContractId, +) { + /** + * Update withdrawal transactionId and set status to BROADCASTED + * + * @typedef updateWithdrawalTransactionIdAndStatus + * + * @param {BlockInfo} blockInfo + * @param {number} coreChainLockedHeight + * @param {Object} transactionIdMap + * @param {Object} options + * + * @returns {Promise} + */ + async function updateWithdrawalTransactionIdAndStatus( + blockInfo, + coreChainLockedHeight, + transactionIdMap, + options, + ) { + const originalTransactionIds = Object.keys(transactionIdMap).map((key) => Buffer.from(key, 'hex')); + + if (originalTransactionIds.length === 0) { + return; + } + + const fetchOptions = { + where: [ + ['status', '==', WITHDRAWALS_STATUS_POOLED], + ['transactionId', 'in', originalTransactionIds], + ], + orderBy: [ + ['transactionId', 'asc'], + ], + ...options, + }; + + const documents = await fetchDocuments( + withdrawalsContractId, + WITHDRAWALS_DOCUMENT_TYPE, + fetchOptions, + ); + + for (const document of documents) { + const originalTransactionIdHex = document.get('transactionId').toString('hex'); + + const updatedTransactionId = transactionIdMap[originalTransactionIdHex]; + + document.set('transactionId', updatedTransactionId); + document.set('transactionSignHeight', coreChainLockedHeight); + document.set('status', WITHDRAWALS_STATUS_BROADCASTED); + document.setRevision(document.getRevision() + 1); + + await documentRepository.update(document, blockInfo, options); + } + } + + return updateWithdrawalTransactionIdAndStatus; +} + +module.exports = updateWithdrawalTransactionIdAndStatusFactory; diff --git a/packages/js-drive/test/integration/blockExecution/BlockExecutionContextRepository.spec.js b/packages/js-drive/test/integration/blockExecution/BlockExecutionContextRepository.spec.js index 4d453ade22a..0044355bc3c 100644 --- a/packages/js-drive/test/integration/blockExecution/BlockExecutionContextRepository.spec.js +++ b/packages/js-drive/test/integration/blockExecution/BlockExecutionContextRepository.spec.js @@ -26,8 +26,17 @@ describe('BlockExecutionContextRepository', () => { blockExecutionContext.fromObject(plainObject); rsDrive = new Drive('./db/grovedb_test', { - dataContractsGlobalCacheSize: 500, - dataContractsBlockCacheSize: 500, + drive: { + dataContractsGlobalCacheSize: 500, + dataContractsBlockCacheSize: 500, + }, + core: { + rpc: { + url: '127.0.0.1', + username: '', + password: '', + }, + }, }); store = new GroveDBStore(rsDrive, noopLogger); diff --git a/packages/js-drive/test/integration/dataContract/DataContractStoreRepository.spec.js b/packages/js-drive/test/integration/dataContract/DataContractStoreRepository.spec.js index b104584e459..a8be383ef27 100644 --- a/packages/js-drive/test/integration/dataContract/DataContractStoreRepository.spec.js +++ b/packages/js-drive/test/integration/dataContract/DataContractStoreRepository.spec.js @@ -20,8 +20,17 @@ describe('DataContractStoreRepository', () => { beforeEach(async () => { rsDrive = new Drive('./db/grovedb_test', { - dataContractsGlobalCacheSize: 500, - dataContractsBlockCacheSize: 500, + drive: { + dataContractsGlobalCacheSize: 500, + dataContractsBlockCacheSize: 500, + }, + core: { + rpc: { + url: '127.0.0.1', + username: '', + password: '', + }, + }, }); store = new GroveDBStore(rsDrive, noopLogger); diff --git a/packages/js-drive/test/integration/groveDB/GroveDBStore.spec.js b/packages/js-drive/test/integration/groveDB/GroveDBStore.spec.js index d37d5bde52d..162c155e7fd 100644 --- a/packages/js-drive/test/integration/groveDB/GroveDBStore.spec.js +++ b/packages/js-drive/test/integration/groveDB/GroveDBStore.spec.js @@ -15,8 +15,17 @@ describe('GroveDBStore', () => { beforeEach(async () => { rsDrive = new Drive('./db/grovedb_test', { - dataContractsGlobalCacheSize: 500, - dataContractsBlockCacheSize: 500, + drive: { + dataContractsGlobalCacheSize: 500, + dataContractsBlockCacheSize: 500, + }, + core: { + rpc: { + url: '127.0.0.1', + username: '', + password: '', + }, + }, }); store = new GroveDBStore(rsDrive, logger); @@ -381,8 +390,17 @@ describe('GroveDBStore', () => { rimraf.sync('./db/grovedb_test'); rsDrive = new Drive('./db/grovedb_test', { - dataContractsGlobalCacheSize: 500, - dataContractsBlockCacheSize: 500, + drive: { + dataContractsGlobalCacheSize: 500, + dataContractsBlockCacheSize: 500, + }, + core: { + rpc: { + url: '127.0.0.1', + username: '', + password: '', + }, + }, }); store = new GroveDBStore(rsDrive, logger); diff --git a/packages/js-drive/test/integration/identity/IdentityBalanceStoreRepository.spec.js b/packages/js-drive/test/integration/identity/IdentityBalanceStoreRepository.spec.js index 22e6ec8eb08..86e79eca563 100644 --- a/packages/js-drive/test/integration/identity/IdentityBalanceStoreRepository.spec.js +++ b/packages/js-drive/test/integration/identity/IdentityBalanceStoreRepository.spec.js @@ -21,8 +21,17 @@ describe('IdentityStoreRepository', () => { beforeEach(async () => { rsDrive = new Drive('./db/grovedb_test', { - dataContractsGlobalCacheSize: 500, - dataContractsBlockCacheSize: 500, + drive: { + dataContractsGlobalCacheSize: 500, + dataContractsBlockCacheSize: 500, + }, + core: { + rpc: { + url: '127.0.0.1', + username: '', + password: '', + }, + }, }); await rsDrive.createInitialStateStructure(); diff --git a/packages/js-drive/test/integration/identity/IdentityPublicKeyStoreRepository.spec.js b/packages/js-drive/test/integration/identity/IdentityPublicKeyStoreRepository.spec.js index d673041f153..b51a68f8c25 100644 --- a/packages/js-drive/test/integration/identity/IdentityPublicKeyStoreRepository.spec.js +++ b/packages/js-drive/test/integration/identity/IdentityPublicKeyStoreRepository.spec.js @@ -19,14 +19,20 @@ describe('IdentityPublicKeyStoreRepository', () => { beforeEach(async () => { rsDrive = new Drive('./db/grovedb_test', { - dataContractsGlobalCacheSize: 500, - dataContractsBlockCacheSize: 500, + drive: { + dataContractsGlobalCacheSize: 500, + dataContractsBlockCacheSize: 500, + }, + core: { + rpc: { + url: '127.0.0.1', + username: '', + password: '', + }, + }, }); - store = new GroveDBStore(rsDrive, logger, { - dataContractsGlobalCacheSize: 500, - dataContractsBlockCacheSize: 500, - }); + store = new GroveDBStore(rsDrive, logger); await rsDrive.createInitialStateStructure(); diff --git a/packages/js-drive/test/integration/identity/IdentityStoreRepository.spec.js b/packages/js-drive/test/integration/identity/IdentityStoreRepository.spec.js index f805566676a..dce6fe8f74b 100644 --- a/packages/js-drive/test/integration/identity/IdentityStoreRepository.spec.js +++ b/packages/js-drive/test/integration/identity/IdentityStoreRepository.spec.js @@ -22,8 +22,17 @@ describe('IdentityStoreRepository', () => { beforeEach(async () => { rsDrive = new Drive('./db/grovedb_test', { - dataContractsGlobalCacheSize: 500, - dataContractsBlockCacheSize: 500, + drive: { + dataContractsGlobalCacheSize: 500, + dataContractsBlockCacheSize: 500, + }, + core: { + rpc: { + url: '127.0.0.1', + username: '', + password: '', + }, + }, }); await rsDrive.createInitialStateStructure(); diff --git a/packages/js-drive/test/integration/identity/SpentAssetLockTransactionsRepository.spec.js b/packages/js-drive/test/integration/identity/SpentAssetLockTransactionsRepository.spec.js index b34d899a43c..b21ab1687fc 100644 --- a/packages/js-drive/test/integration/identity/SpentAssetLockTransactionsRepository.spec.js +++ b/packages/js-drive/test/integration/identity/SpentAssetLockTransactionsRepository.spec.js @@ -16,8 +16,17 @@ describe('SpentAssetLockTransactionsRepository', () => { outPointBuffer = Buffer.from([42]); rsDrive = new Drive('./db/grovedb_test', { - dataContractsGlobalCacheSize: 500, - dataContractsBlockCacheSize: 500, + drive: { + dataContractsGlobalCacheSize: 500, + dataContractsBlockCacheSize: 500, + }, + core: { + rpc: { + url: '127.0.0.1', + username: '', + password: '', + }, + }, }); store = new GroveDBStore(rsDrive, logger); diff --git a/packages/js-drive/test/integration/identity/masternode/LastSyncedCoreHeightRepository.spec.js b/packages/js-drive/test/integration/identity/masternode/LastSyncedCoreHeightRepository.spec.js index 9578f29bf5f..d9c75f95503 100644 --- a/packages/js-drive/test/integration/identity/masternode/LastSyncedCoreHeightRepository.spec.js +++ b/packages/js-drive/test/integration/identity/masternode/LastSyncedCoreHeightRepository.spec.js @@ -13,8 +13,17 @@ describe('LastSyncedSmlHeightRepository', () => { beforeEach(async () => { rsDrive = new Drive('./db/grovedb_test', { - dataContractsGlobalCacheSize: 500, - dataContractsBlockCacheSize: 500, + drive: { + dataContractsGlobalCacheSize: 500, + dataContractsBlockCacheSize: 500, + }, + core: { + rpc: { + url: '127.0.0.1', + username: '', + password: '', + }, + }, }); store = new GroveDBStore(rsDrive, logger); diff --git a/packages/js-drive/test/integration/identity/masternode/synchronizeMasternodeIdentitiesFactory.spec.js b/packages/js-drive/test/integration/identity/masternode/synchronizeMasternodeIdentitiesFactory.spec.js index 2259828f358..34c02f709e4 100644 --- a/packages/js-drive/test/integration/identity/masternode/synchronizeMasternodeIdentitiesFactory.spec.js +++ b/packages/js-drive/test/integration/identity/masternode/synchronizeMasternodeIdentitiesFactory.spec.js @@ -364,7 +364,7 @@ describe('synchronizeMasternodeIdentitiesFactory', function main() { beforeEach(async function beforeEach() { coreHeight = 3; - firstSyncAppHash = '56ad230b2fb8abb8fcf9b2b3333debf19bf936798052c27ce341d3f418d598bc'; + firstSyncAppHash = 'c55de453e3ea4481f20225efdc12d671f715f0618cf3084bb32e56e75123bfdd'; blockInfo = new BlockInfo(10, 0, 1668702100799); container = await createTestDIContainer(); @@ -684,7 +684,7 @@ describe('synchronizeMasternodeIdentitiesFactory', function main() { // Nothing happened - await expectDeterministicAppHash('0aefb0b73b5db18320ccb6710b3bbc773ec7efa52def3c7efa886797ff386489'); + await expectDeterministicAppHash('a789fe73ceea6769634b98ae82dddad5013c4711b2a8353d2150b813fb953cb3'); // Core RPC should be called @@ -720,7 +720,7 @@ describe('synchronizeMasternodeIdentitiesFactory', function main() { expect(result2.updatedEntities).to.have.lengthOf(0); expect(result2.removedEntities).to.have.lengthOf(0); - await expectDeterministicAppHash('c8e5184cf7799babbf54364a3b4dec9da8787fb916fb6fbfe8e208a89eb00d46'); + await expectDeterministicAppHash('b58bc0499156caea9b930ab0e357f56b20bb191bc7ba97a2eb8941d9ba7a8183'); // New masternode identity should be created @@ -804,7 +804,7 @@ describe('synchronizeMasternodeIdentitiesFactory', function main() { expect(result.updatedEntities).to.have.lengthOf(0); expect(result.removedEntities).to.have.lengthOf(1); - await expectDeterministicAppHash('54ae9ccd7e3e0792d48197f3593ecd3fd2b9ebd807e44b65aefb1204ead35f73'); + await expectDeterministicAppHash('ea28d7339efd80984e53bd06b0d3708611f24862f401997e9cd69328af6a54c2'); // Masternode identity should stay @@ -872,7 +872,7 @@ describe('synchronizeMasternodeIdentitiesFactory', function main() { expect(result.updatedEntities).to.have.lengthOf(0); expect(result.removedEntities).to.have.lengthOf(1); - await expectDeterministicAppHash('54ae9ccd7e3e0792d48197f3593ecd3fd2b9ebd807e44b65aefb1204ead35f73'); + await expectDeterministicAppHash('ea28d7339efd80984e53bd06b0d3708611f24862f401997e9cd69328af6a54c2'); const invalidMasternodeIdentifier = Identifier.from( Buffer.from(invalidSmlEntry.proRegTxHash, 'hex'), @@ -922,7 +922,7 @@ describe('synchronizeMasternodeIdentitiesFactory', function main() { expect(result.updatedEntities).to.have.lengthOf(1); expect(result.removedEntities).to.have.lengthOf(1); - await expectDeterministicAppHash('13a83c7ece07bc0b91e0552b0a854acaac5a57eb484368050906399663454e72'); + await expectDeterministicAppHash('d80a3bbf5e699a0295e4ba734e3729d12bef3001bdd9cdd295673832d05454cf'); // Masternode identity should stay @@ -997,7 +997,7 @@ describe('synchronizeMasternodeIdentitiesFactory', function main() { await synchronizeMasternodeIdentities(coreHeight + 1, blockInfo); - await expectDeterministicAppHash('fef91740ed0a059c9799b4043c6163e8599ff65fbd191f70d3e9bf0c0e46fec0'); + await expectDeterministicAppHash('cfc8c439d00c2afab01594dc699ef71c8b23cff66b0302794d3c6b88c44d687c'); // Masternode identity should contain new public key @@ -1079,7 +1079,7 @@ describe('synchronizeMasternodeIdentitiesFactory', function main() { // Initial sync await synchronizeMasternodeIdentities(coreHeight, blockInfo); - await expectDeterministicAppHash('b512df69cad926cfae421cd69bbc047d9c0db0800488eed5ca85859ea55513c9'); + await expectDeterministicAppHash('7a5729e3511c5cc98e8452faa6132f0d600e04fef85df0f95f56e59c776de170'); const votingIdentifier = createVotingIdentifier(smlFixture[0]); const votingIdentityResult = await identityRepository.fetch( diff --git a/packages/js-drive/test/unit/abci/handlers/finalizeBlockHandlerFactory.spec.js b/packages/js-drive/test/unit/abci/handlers/finalizeBlockHandlerFactory.spec.js index 062329f12bc..fe254bd9555 100644 --- a/packages/js-drive/test/unit/abci/handlers/finalizeBlockHandlerFactory.spec.js +++ b/packages/js-drive/test/unit/abci/handlers/finalizeBlockHandlerFactory.spec.js @@ -25,13 +25,13 @@ describe('finalizeBlockHandlerFactory', () => { let requestMock; let appHash; let groveDBStoreMock; - let coreRpcClientMock; let blockExecutionContextRepositoryMock; let dataContract; let proposalBlockExecutionContextMock; let round; let block; let processProposalMock; + let broadcastWithdrawalTransactions; let createContextLoggerMock; beforeEach(function beforeEach() { @@ -85,14 +85,14 @@ describe('finalizeBlockHandlerFactory', () => { proposalBlockExecutionContextMock.getHeight.returns(new Long(42)); proposalBlockExecutionContextMock.getRound.returns(round); proposalBlockExecutionContextMock.getDataContracts.returns([dataContract]); + proposalBlockExecutionContextMock.getEpochInfo.returns({ + currentEpochIndex: 1, + }); + proposalBlockExecutionContextMock.getTimeMs.returns((new Date()).getTime()); groveDBStoreMock = new GroveDBStoreMock(this.sinon); groveDBStoreMock.getRootHash.resolves(appHash); - coreRpcClientMock = { - sendRawTransaction: this.sinon.stub(), - }; - blockExecutionContextRepositoryMock = new BlockExecutionContextRepositoryMock( this.sinon, ); @@ -100,15 +100,17 @@ describe('finalizeBlockHandlerFactory', () => { processProposalMock = this.sinon.stub(); createContextLoggerMock = this.sinon.stub().returns(loggerMock); + broadcastWithdrawalTransactions = this.sinon.stub(); + finalizeBlockHandler = finalizeBlockHandlerFactory( groveDBStoreMock, blockExecutionContextRepositoryMock, - coreRpcClientMock, loggerMock, executionTimerMock, latestBlockExecutionContextMock, proposalBlockExecutionContextMock, processProposalMock, + broadcastWithdrawalTransactions, createContextLoggerMock, ); }); @@ -133,6 +135,13 @@ describe('finalizeBlockHandlerFactory', () => { expect(latestBlockExecutionContextMock.populate).to.be.calledOnce(); expect(processProposalMock).to.be.not.called(); + + expect(broadcastWithdrawalTransactions).to.have.been.calledOnceWith( + proposalBlockExecutionContextMock, + undefined, + undefined, + ); + expect(createContextLoggerMock).to.be.calledOnceWithExactly( loggerMock, { height: '42', @@ -168,7 +177,6 @@ describe('finalizeBlockHandlerFactory', () => { await finalizeBlockHandler(requestMock); - expect(coreRpcClientMock.sendRawTransaction).to.have.been.calledTwice(); expect(processProposalMock).to.be.not.called(); expect(createContextLoggerMock).to.be.calledOnceWithExactly( loggerMock, { diff --git a/packages/js-drive/test/unit/abci/handlers/prepareProposalHandlerFactory.spec.js b/packages/js-drive/test/unit/abci/handlers/prepareProposalHandlerFactory.spec.js index 1459e7f20be..61b9a4bf56a 100644 --- a/packages/js-drive/test/unit/abci/handlers/prepareProposalHandlerFactory.spec.js +++ b/packages/js-drive/test/unit/abci/handlers/prepareProposalHandlerFactory.spec.js @@ -34,6 +34,7 @@ describe('prepareProposalHandlerFactory', () => { let round; let executionTimerMock; let createContextLoggerMock; + let quorumHash; beforeEach(function beforeEach() { round = 1; @@ -127,6 +128,8 @@ describe('prepareProposalHandlerFactory', () => { const localLastCommit = {}; + quorumHash = Buffer.alloc(32, 0); + request = { height, maxTxBytes, @@ -138,6 +141,7 @@ describe('prepareProposalHandlerFactory', () => { proposerProTxHash, proposedAppVersion, round, + quorumHash, }; }); @@ -169,6 +173,7 @@ describe('prepareProposalHandlerFactory', () => { proposerProTxHash: Buffer.from(request.proposerProTxHash), proposedAppVersion: request.proposedAppVersion, round, + quorumHash, }, loggerMock, ); diff --git a/packages/js-drive/test/unit/abci/handlers/proposal/beginBlockFactory.spec.js b/packages/js-drive/test/unit/abci/handlers/proposal/beginBlockFactory.spec.js index fd13ab77cc2..c91301cc649 100644 --- a/packages/js-drive/test/unit/abci/handlers/proposal/beginBlockFactory.spec.js +++ b/packages/js-drive/test/unit/abci/handlers/proposal/beginBlockFactory.spec.js @@ -47,6 +47,7 @@ describe('beginBlockFactory', () => { let timeMs; let epochInfo; let time; + let lastSyncedCoreHeightRepositoryMock; let simplifyMasternodeListMock; let validMasternodesListLength; @@ -110,6 +111,12 @@ describe('beginBlockFactory', () => { blockBegin: this.sinon.stub().resolves(rsResponseMock), }; + lastSyncedCoreHeightRepositoryMock = { + fetch: this.sinon.stub().resolves({ + getValue: () => undefined, + }), + }; + validMasternodesListLength = 400; simplifyMasternodeListMock = { @@ -138,6 +145,7 @@ describe('beginBlockFactory', () => { synchronizeMasternodeIdentitiesMock, rsAbciMock, executionTimerMock, + lastSyncedCoreHeightRepositoryMock, simplifyMasternodeListMock, ); diff --git a/packages/js-drive/test/unit/abci/handlers/proposal/broadcastWithdrawalTransactionsFactory.spec.js b/packages/js-drive/test/unit/abci/handlers/proposal/broadcastWithdrawalTransactionsFactory.spec.js new file mode 100644 index 00000000000..11e775a082c --- /dev/null +++ b/packages/js-drive/test/unit/abci/handlers/proposal/broadcastWithdrawalTransactionsFactory.spec.js @@ -0,0 +1,69 @@ +const Long = require('long'); + +const BlockExecutionContextMock = require('../../../../../lib/test/mock/BlockExecutionContextMock'); + +const broadcastWithdrawalTransactionsFactory = require('../../../../../lib/abci/handlers/proposal/broadcastWithdrawalTransactionsFactory'); +const BlockInfo = require('../../../../../lib/blockExecution/BlockInfo'); + +describe('broadcastWithdrawalTransactionsFactory', () => { + let broadcastWithdrawalTransactions; + let proposalBlockExecutionContextMock; + let coreRpcMock; + let updateWithdrawalTransactionIdAndStatusMock; + + beforeEach(function beforeEach() { + proposalBlockExecutionContextMock = new BlockExecutionContextMock(this.sinon); + + proposalBlockExecutionContextMock.getEpochInfo.returns({ + currentEpochIndex: 1, + }); + proposalBlockExecutionContextMock.getHeight.returns(new Long(1)); + proposalBlockExecutionContextMock.getTimeMs.returns(1); + proposalBlockExecutionContextMock.getCoreChainLockedHeight.returns(42); + + coreRpcMock = { + sendRawTransaction: this.sinon.stub(), + }; + + updateWithdrawalTransactionIdAndStatusMock = this.sinon.stub(); + + broadcastWithdrawalTransactions = broadcastWithdrawalTransactionsFactory( + coreRpcMock, + updateWithdrawalTransactionIdAndStatusMock, + ); + }); + + it('should call Core RPC and call document update function', async () => { + const extension = Buffer.alloc(32, 2); + const signature = Buffer.alloc(32, 3); + + const txBytes = Buffer.alloc(32, 1); + + const thresholdVoteExtensions = [ + { extension, signature }, + ]; + const unsignedWithdrawalTransactionsMap = { + [extension.toString('hex')]: txBytes, + }; + + await broadcastWithdrawalTransactions( + proposalBlockExecutionContextMock, + thresholdVoteExtensions, + unsignedWithdrawalTransactionsMap, + ); + + const expectedMap = { [txBytes.toString('hex')]: Buffer.concat([txBytes, signature]) }; + + expect(coreRpcMock.sendRawTransaction).to.have.been.calledOnceWithExactly( + Buffer.concat([txBytes, signature]).toString('hex'), + ); + expect(updateWithdrawalTransactionIdAndStatusMock).to.have.been.calledOnceWithExactly( + BlockInfo.createFromBlockExecutionContext(proposalBlockExecutionContextMock), + 42, + expectedMap, + { + useTransaction: true, + }, + ); + }); +}); diff --git a/packages/js-drive/test/unit/abci/handlers/proposal/processProposalFactory.spec.js b/packages/js-drive/test/unit/abci/handlers/proposal/processProposalFactory.spec.js index 08195f75f03..c114a5e676a 100644 --- a/packages/js-drive/test/unit/abci/handlers/proposal/processProposalFactory.spec.js +++ b/packages/js-drive/test/unit/abci/handlers/proposal/processProposalFactory.spec.js @@ -30,6 +30,7 @@ describe('processProposalFactory', () => { let proposalBlockExecutionContextMock; let round; let executionTimerMock; + let quorumHash; beforeEach(function beforeEach() { round = 0; @@ -109,6 +110,8 @@ describe('processProposalFactory', () => { signature: '1897ce8f54d2070f44ca5c29983b68b391e8137c25e44f67416e579f3e3bdfef7b4fd22db7818399147e52907998857b0fbc8edfdc40a64f2c7df0e88544d31d12ca8c15e73d50dda25ca23f754ed3f789ed4bcb392161995f464017c10df404', }; + quorumHash = Buffer.alloc(32, 0); + request = { round, height, @@ -119,6 +122,7 @@ describe('processProposalFactory', () => { time, proposerProTxHash, coreChainLockUpdate, + quorumHash, }; }); @@ -142,6 +146,7 @@ describe('processProposalFactory', () => { proposerProTxHash: Buffer.from(request.proposerProTxHash), proposedAppVersion: request.proposedAppVersion, round, + quorumHash, }, loggerMock, ); diff --git a/packages/js-drive/test/unit/abci/handlers/verifyVoteExtensionHandlerFactory.spec.js b/packages/js-drive/test/unit/abci/handlers/verifyVoteExtensionHandlerFactory.spec.js index 722f1443d1d..ab558ad9639 100644 --- a/packages/js-drive/test/unit/abci/handlers/verifyVoteExtensionHandlerFactory.spec.js +++ b/packages/js-drive/test/unit/abci/handlers/verifyVoteExtensionHandlerFactory.spec.js @@ -3,6 +3,9 @@ const { abci: { ResponseVerifyVoteExtension, }, + types: { + VoteExtensionType, + }, }, } = require('@dashevo/abci/types'); const verifyVoteExtensionHandlerFactory = require('../../../../lib/abci/handlers/verifyVoteExtensionHandlerFactory'); @@ -12,6 +15,7 @@ const LoggerMock = require('../../../../lib/test/mock/LoggerMock'); describe('verifyVoteExtensionHandlerFactory', () => { let verifyVoteExtensionHandler; let proposalBlockExecutionContextMock; + let unsignedWithdrawalTransactionsMapMock; beforeEach(function beforeEach() { proposalBlockExecutionContextMock = new BlockExecutionContextMock(this.sinon); @@ -19,13 +23,74 @@ describe('verifyVoteExtensionHandlerFactory', () => { const loggerMock = new LoggerMock(this.sinon); proposalBlockExecutionContextMock.getContextLogger.returns(loggerMock); + unsignedWithdrawalTransactionsMapMock = {}; + proposalBlockExecutionContextMock.getWithdrawalTransactionsMap.returns( + unsignedWithdrawalTransactionsMapMock, + ); + verifyVoteExtensionHandler = verifyVoteExtensionHandlerFactory( proposalBlockExecutionContextMock, ); }); - it('should return ResponseVerifyVoteExtension', async () => { - const result = await verifyVoteExtensionHandler(); + it('should return ResponseVerifyVoteExtension with REJECT status if vote extensions length not match', async () => { + const voteExtensions = [ + { type: VoteExtensionType.THRESHOLD_RECOVER, extension: Buffer.alloc(32, 1) }, + { type: VoteExtensionType.THRESHOLD_RECOVER, extension: Buffer.alloc(32, 2) }, + { type: VoteExtensionType.THRESHOLD_RECOVER, extension: Buffer.alloc(32, 3) }, + ]; + + const unsignedWithdrawalTransactionsMap = { + [Buffer.alloc(32, 1).toString('hex')]: undefined, + [Buffer.alloc(32, 2).toString('hex')]: undefined, + }; + + proposalBlockExecutionContextMock.getWithdrawalTransactionsMap.returns( + unsignedWithdrawalTransactionsMap, + ); + + const result = await verifyVoteExtensionHandler({ voteExtensions }); + + expect(result).to.be.an.instanceOf(ResponseVerifyVoteExtension); + expect(result.status).to.equal(2); + }); + + it('should return ResponseVerifyVoteExtension with REJECT status if vote extension is missing', async () => { + const voteExtensions = [ + { type: VoteExtensionType.THRESHOLD_RECOVER, extension: Buffer.alloc(32, 1) }, + ]; + + const unsignedWithdrawalTransactionsMap = { + [Buffer.alloc(32, 1).toString('hex')]: undefined, + [Buffer.alloc(32, 2).toString('hex')]: undefined, + }; + + proposalBlockExecutionContextMock.getWithdrawalTransactionsMap.returns( + unsignedWithdrawalTransactionsMap, + ); + + const result = await verifyVoteExtensionHandler({ voteExtensions }); + + expect(result).to.be.an.instanceOf(ResponseVerifyVoteExtension); + expect(result.status).to.equal(2); + }); + + it('should return ACCEPT if everything is fine', async () => { + const voteExtensions = [ + { type: VoteExtensionType.THRESHOLD_RECOVER, extension: Buffer.alloc(32, 1) }, + { type: VoteExtensionType.THRESHOLD_RECOVER, extension: Buffer.alloc(32, 2) }, + ]; + + const unsignedWithdrawalTransactionsMap = { + [Buffer.alloc(32, 1).toString('hex')]: undefined, + [Buffer.alloc(32, 2).toString('hex')]: undefined, + }; + + proposalBlockExecutionContextMock.getWithdrawalTransactionsMap.returns( + unsignedWithdrawalTransactionsMap, + ); + + const result = await verifyVoteExtensionHandler({ voteExtensions }); expect(result).to.be.an.instanceOf(ResponseVerifyVoteExtension); expect(result.status).to.equal(1); diff --git a/packages/js-drive/test/unit/dpp/DriveStateRepository.spec.js b/packages/js-drive/test/unit/dpp/DriveStateRepository.spec.js index e0776c8bfc9..a974f8522a7 100644 --- a/packages/js-drive/test/unit/dpp/DriveStateRepository.spec.js +++ b/packages/js-drive/test/unit/dpp/DriveStateRepository.spec.js @@ -703,7 +703,9 @@ describe('DriveStateRepository', () => { expect( rsDriveMock.fetchLatestWithdrawalTransactionIndex, ).to.have.been.calledOnceWithExactly( + blockInfo, repositoryOptions.useTransaction, + repositoryOptions.dryRun, ); }); }); @@ -722,6 +724,7 @@ describe('DriveStateRepository', () => { ).to.have.been.calledOnceWithExactly( index, transactionBytes, + blockInfo, repositoryOptions.useTransaction, ); }); diff --git a/packages/js-drive/test/unit/identity/withdrawals/updateWithdrawalTransactionIdAndStatusFactory.spec.js b/packages/js-drive/test/unit/identity/withdrawals/updateWithdrawalTransactionIdAndStatusFactory.spec.js new file mode 100644 index 00000000000..1985b1b1623 --- /dev/null +++ b/packages/js-drive/test/unit/identity/withdrawals/updateWithdrawalTransactionIdAndStatusFactory.spec.js @@ -0,0 +1,89 @@ +const getDocumentsFixture = require('@dashevo/dpp/lib/test/fixtures/getDocumentsFixture'); +const Identifier = require('@dashevo/dpp/lib/identifier/Identifier'); + +const updateWithdrawalTransactionIdAndStatusFactory = require('../../../../lib/identity/withdrawals/updateWithdrawalTransactionIdAndStatusFactory'); +const BlockInfo = require('../../../../lib/blockExecution/BlockInfo'); + +describe('updateWithdrawalTransactionIdAndStatusFactory', () => { + let updateWithdrawalTransactionIdAndStatus; + let withdrawalsContractId; + let documentRepositoryMock; + let fetchDocumentsMock; + let document1Fixture; + let document2Fixture; + + beforeEach(function beforeEach() { + ([document1Fixture, document2Fixture] = getDocumentsFixture()); + + document1Fixture.set('transactionId', Buffer.alloc(32, 1)); + document2Fixture.set('transactionId', Buffer.alloc(32, 3)); + + withdrawalsContractId = Identifier.from(Buffer.alloc(32)); + + documentRepositoryMock = { + update: this.sinon.stub(), + }; + + fetchDocumentsMock = this.sinon.stub(); + fetchDocumentsMock.resolves([document1Fixture, document2Fixture]); + + updateWithdrawalTransactionIdAndStatus = updateWithdrawalTransactionIdAndStatusFactory( + documentRepositoryMock, + fetchDocumentsMock, + withdrawalsContractId, + ); + }); + + it('should update documents transactionId, status and revision', async () => { + const blockInfo = new BlockInfo(1, 1, 1); + + const coreChainLockedHeight = 42; + + const transactionIdMap = { + [Buffer.alloc(32, 1).toString('hex')]: Buffer.alloc(32, 2), + [Buffer.alloc(32, 3).toString('hex')]: Buffer.alloc(32, 4), + }; + + await updateWithdrawalTransactionIdAndStatus( + blockInfo, + coreChainLockedHeight, + transactionIdMap, + { + useTransaction: true, + }, + ); + + expect(fetchDocumentsMock).to.have.been.calledOnceWithExactly( + withdrawalsContractId, + 'withdrawal', + { + where: [ + ['status', '==', 1], + ['transactionId', 'in', [Buffer.alloc(32, 1), Buffer.alloc(32, 3)]], + ], + orderBy: [ + ['transactionId', 'asc'], + ], + useTransaction: true, + }, + ); + + expect(documentRepositoryMock.update).to.have.been.calledTwice(); + expect(documentRepositoryMock.update.getCall(0).args).to.deep.equal( + [document1Fixture, blockInfo, { useTransaction: true }], + ); + expect(documentRepositoryMock.update.getCall(1).args).to.deep.equal( + [document2Fixture, blockInfo, { useTransaction: true }], + ); + + expect(document1Fixture.get('transactionSignHeight')).to.deep.equal(coreChainLockedHeight); + expect(document1Fixture.get('transactionId')).to.deep.equal(Buffer.alloc(32, 2)); + expect(document1Fixture.get('status')).to.deep.equal(2); + expect(document1Fixture.getRevision()).to.deep.equal(2); + + expect(document2Fixture.get('transactionSignHeight')).to.deep.equal(coreChainLockedHeight); + expect(document2Fixture.get('transactionId')).to.deep.equal(Buffer.alloc(32, 4)); + expect(document2Fixture.get('status')).to.deep.equal(2); + expect(document2Fixture.getRevision()).to.deep.equal(2); + }); +}); diff --git a/packages/rs-dpp/Cargo.toml b/packages/rs-dpp/Cargo.toml index 81d8d5e015e..bb563469cb9 100644 --- a/packages/rs-dpp/Cargo.toml +++ b/packages/rs-dpp/Cargo.toml @@ -13,7 +13,7 @@ bs58 = "0.4.0" byteorder = { version="1.4"} chrono = { version="0.4.20", default-features=false, features=["wasmbind", "clock"]} ciborium = { git="https://github.com/qrayven/ciborium", branch="feat-ser-null-as-undefined"} -dashcore = { git="https://github.com/dashevo/rust-dashcore", features=["no-std", "secp-recovery", "rand", "signer", "use-serde"], default-features = false, branch="master" } +dashcore = { git="https://github.com/dashevo/rust-dashcore", features=["std", "secp-recovery", "rand", "signer", "use-serde"], default-features = false, rev = "51548a4a1b9eca7430f5f3caf94d9784886ff2e9" } env_logger = { version="0.9"} futures = { version ="0.3"} getrandom= { version="0.2", features=["js"]} @@ -40,12 +40,10 @@ mockall = { version="0.11.3", optional=true} data-contracts = { path = "../data-contracts" } [dev-dependencies] -mockall= { version ="0.11"} test-case = { version ="2.0"} tokio = { version ="1.17", features=["full"]} pretty_assertions = { version="1.3.0"} - [features] -mocks = ["mockall"] - +default = ["fixtures-and-mocks"] +fixtures-and-mocks = ["mockall"] diff --git a/packages/rs-dpp/src/contracts/mod.rs b/packages/rs-dpp/src/contracts/mod.rs index b8de2e21572..b426ef7239c 100644 --- a/packages/rs-dpp/src/contracts/mod.rs +++ b/packages/rs-dpp/src/contracts/mod.rs @@ -5,6 +5,7 @@ pub mod dashpay_contract; pub mod dpns_contract; pub mod feature_flags_contract; pub mod masternode_reward_shares_contract; +pub mod withdrawals_contract; /// Contains the system IDs. //? The structure contains the heap-allocated String, which is not memory efficient. diff --git a/packages/rs-dpp/src/contracts/withdrawals_contract.rs b/packages/rs-dpp/src/contracts/withdrawals_contract.rs new file mode 100644 index 00000000000..c397debfa9b --- /dev/null +++ b/packages/rs-dpp/src/contracts/withdrawals_contract.rs @@ -0,0 +1,54 @@ +use lazy_static::lazy_static; +use num_enum::{IntoPrimitive, TryFromPrimitive}; +use serde_repr::{Deserialize_repr, Serialize_repr}; + +use crate::prelude::Identifier; + +pub mod document_types { + pub const WITHDRAWAL: &str = "withdrawal"; +} + +pub mod property_names { + pub const TRANSACTION_ID: &str = "transactionId"; + pub const TRANSACTION_SIGN_HEIGHT: &str = "transactionSignHeight"; + pub const TRANSACTION_INDEX: &str = "transactionIndex"; + pub const AMOUNT: &str = "amount"; + pub const CORE_FEE_PER_BYTE: &str = "coreFeePerByte"; + pub const POOLING: &str = "pooling"; + pub const OUTPUT_SCRIPT: &str = "outputScript"; + pub const STATUS: &str = "status"; + pub const CREATE_AT: &str = "$createdAt"; + pub const UPDATED_AT: &str = "$updatedAt"; + pub const OWNER_ID: &str = "$ownerId"; +} + +#[repr(u8)] +#[derive( + Serialize_repr, + Deserialize_repr, + PartialEq, + Eq, + Clone, + Copy, + Debug, + TryFromPrimitive, + IntoPrimitive, +)] +pub enum WithdrawalStatus { + QUEUED = 0, + POOLED = 1, + BROADCASTED = 2, + COMPLETE = 3, + EXPIRED = 4, +} + +lazy_static! { + pub static ref CONTRACT_ID: Identifier = Identifier::new([ + 54, 98, 187, 97, 225, 127, 174, 62, 162, 148, 207, 96, 49, 151, 251, 10, 171, 109, 81, 24, + 11, 216, 182, 16, 76, 73, 68, 166, 47, 226, 217, 127 + ]); + pub static ref OWNER_ID: Identifier = Identifier::new([ + 170, 138, 235, 213, 173, 122, 202, 36, 243, 48, 61, 185, 146, 50, 146, 255, 194, 133, 221, + 176, 188, 82, 144, 69, 234, 198, 106, 35, 245, 167, 46, 192 + ]); +} diff --git a/packages/rs-dpp/src/data_contract/document_type/document_field.rs b/packages/rs-dpp/src/data_contract/document_type/document_field.rs index 839053287c3..c1768d0c650 100644 --- a/packages/rs-dpp/src/data_contract/document_type/document_field.rs +++ b/packages/rs-dpp/src/data_contract/document_type/document_field.rs @@ -263,7 +263,7 @@ impl DocumentFieldType { fn read_varint_value(buf: &mut BufReader<&[u8]>) -> Result>, ProtocolError> { let bytes: usize = buf.read_varint().map_err(|_| { ProtocolError::DataContractError(DataContractError::CorruptedSerialization( - "error reading from serialized document", + "error reading varint length from serialized document", )) })?; if bytes == 0 { @@ -272,7 +272,7 @@ impl DocumentFieldType { let mut value: Vec = vec![0u8; bytes]; buf.read_exact(&mut value).map_err(|_| { ProtocolError::DataContractError(DataContractError::CorruptedSerialization( - "error reading from serialized document", + "error reading varint from serialized document", )) })?; Ok(Some(value)) diff --git a/packages/rs-dpp/src/data_contract/document_type/random_document.rs b/packages/rs-dpp/src/data_contract/document_type/random_document.rs index 4657f758175..6aac1bcb1b0 100644 --- a/packages/rs-dpp/src/data_contract/document_type/random_document.rs +++ b/packages/rs-dpp/src/data_contract/document_type/random_document.rs @@ -109,6 +109,7 @@ impl CreateRandomDocument for DocumentType { id, properties, owner_id, + revision: 1, } } @@ -156,6 +157,7 @@ impl CreateRandomDocument for DocumentType { id, properties, owner_id, + revision: 1, } } } diff --git a/packages/rs-dpp/src/data_trigger/get_data_triggers_factory.rs b/packages/rs-dpp/src/data_trigger/get_data_triggers_factory.rs index 198c21a157d..fad6f09b4a3 100644 --- a/packages/rs-dpp/src/data_trigger/get_data_triggers_factory.rs +++ b/packages/rs-dpp/src/data_trigger/get_data_triggers_factory.rs @@ -1,8 +1,11 @@ use std::vec; +use lazy_static::__Deref; + use crate::{ contracts::{ dashpay_contract, dpns_contract, feature_flags_contract, masternode_reward_shares_contract, + withdrawals_contract, }, document::document_transition::Action, errors::ProtocolError, @@ -53,6 +56,8 @@ fn data_triggers() -> Result, ProtocolError> { &masternode_reward_shares_contract::system_ids().contract_id, Encoding::Base58, )?; + let withdrawals_owner_id = withdrawals_contract::OWNER_ID.deref(); + let withdrawals_contract_id = withdrawals_contract::CONTRACT_ID.deref(); let data_triggers = vec![ DataTrigger { @@ -146,6 +151,27 @@ fn data_triggers() -> Result, ProtocolError> { data_trigger_kind: DataTriggerKind::CreateDataContractRequest, top_level_identity: None, }, + DataTrigger { + data_contract_id: *withdrawals_contract_id, + document_type: withdrawals_contract::document_types::WITHDRAWAL.to_string(), + transition_action: Action::Create, + data_trigger_kind: DataTriggerKind::DataTriggerReject, + top_level_identity: None, + }, + DataTrigger { + data_contract_id: *withdrawals_contract_id, + document_type: withdrawals_contract::document_types::WITHDRAWAL.to_string(), + transition_action: Action::Replace, + data_trigger_kind: DataTriggerKind::DataTriggerReject, + top_level_identity: None, + }, + DataTrigger { + data_contract_id: *withdrawals_contract_id, + document_type: withdrawals_contract::document_types::WITHDRAWAL.to_string(), + transition_action: Action::Delete, + data_trigger_kind: DataTriggerKind::DeleteWithdrawal, + top_level_identity: Some(*withdrawals_owner_id), + }, ]; Ok(data_triggers) } diff --git a/packages/rs-dpp/src/data_trigger/mod.rs b/packages/rs-dpp/src/data_trigger/mod.rs index 636660e377f..64b55db6996 100644 --- a/packages/rs-dpp/src/data_trigger/mod.rs +++ b/packages/rs-dpp/src/data_trigger/mod.rs @@ -14,6 +14,7 @@ use self::dashpay_data_triggers::create_contact_request_data_trigger; use self::dpns_triggers::create_domain_data_trigger; use self::feature_flags_data_triggers::create_feature_flag_data_trigger; use self::reward_share_data_triggers::create_masternode_reward_shares_data_trigger; +use self::withdrawals_data_triggers::delete_withdrawal_data_trigger; mod data_trigger_execution_context; @@ -22,6 +23,7 @@ pub mod dpns_triggers; pub mod feature_flags_data_triggers; pub mod get_data_triggers_factory; pub mod reward_share_data_triggers; +pub mod withdrawals_data_triggers; mod data_trigger_execution_result; mod reject_data_trigger; @@ -41,6 +43,7 @@ pub enum DataTriggerKind { DataTriggerRewardShare, DataTriggerReject, CrateFeatureFlag, + DeleteWithdrawal, } pub struct DataTrigger { @@ -126,6 +129,9 @@ where create_masternode_reward_shares_data_trigger(document_transition, context, identifier) .await } + DataTriggerKind::DeleteWithdrawal => { + delete_withdrawal_data_trigger(document_transition, context, identifier).await + } } } diff --git a/packages/rs-dpp/src/data_trigger/withdrawals_data_triggers/mod.rs b/packages/rs-dpp/src/data_trigger/withdrawals_data_triggers/mod.rs new file mode 100644 index 00000000000..9517b1122db --- /dev/null +++ b/packages/rs-dpp/src/data_trigger/withdrawals_data_triggers/mod.rs @@ -0,0 +1,175 @@ +use anyhow::{anyhow, bail}; +use serde_json::json; + +use crate::contracts::withdrawals_contract; +use crate::data_trigger::DataTriggerError; +use crate::data_trigger::DataTriggerExecutionContext; +use crate::data_trigger::DataTriggerExecutionResult; +use crate::document::Document; +use crate::get_from_transition; +use crate::prelude::DocumentTransition; +use crate::prelude::Identifier; +use crate::state_repository::StateRepositoryLike; + +pub async fn delete_withdrawal_data_trigger<'a, SR>( + document_transition: &DocumentTransition, + context: &DataTriggerExecutionContext<'a, SR>, + _top_level_identity: Option<&Identifier>, +) -> Result +where + SR: StateRepositoryLike, +{ + let mut result = DataTriggerExecutionResult::default(); + + let DocumentTransition::Delete(dt_delete) = document_transition else { + bail!( + "the Document Transition {} isn't 'DELETE'", + get_from_transition!(document_transition, id) + ); + }; + + let withdrawals: Vec = context + .state_repository + .fetch_documents( + &context.data_contract.id, + withdrawals_contract::document_types::WITHDRAWAL, + json!({ + "where" : [ + ["$id", "==", dt_delete.base.id], + ] + }), + context.state_transition_execution_context, + ) + .await?; + + let Some(withdrawal) = withdrawals.get(0) else { + let err = DataTriggerError::DataTriggerConditionError { + data_contract_id: context.data_contract.id, + document_transition_id: dt_delete.base.id, + message: "Withdrawal document was not found".to_string(), + owner_id: Some(*context.owner_id), + document_transition: Some(DocumentTransition::Delete(dt_delete.clone())), + }; + + result.add_error(err.into()); + + return Ok(result); + }; + + let status = withdrawal + .get("status") + .ok_or_else(|| anyhow!("can't get withdrawal status property from the document"))? + .as_u64() + .ok_or_else(|| anyhow!("can't convert withdrawal status to u64"))? as u8; + + if status != withdrawals_contract::WithdrawalStatus::COMPLETE as u8 + || status != withdrawals_contract::WithdrawalStatus::EXPIRED as u8 + { + let err = DataTriggerError::DataTriggerConditionError { + data_contract_id: context.data_contract.id, + document_transition_id: dt_delete.base.id, + message: "withdrawal deletion is allowed only for COMPLETE and EXPIRED statuses" + .to_string(), + owner_id: Some(*context.owner_id), + document_transition: Some(DocumentTransition::Delete(dt_delete.clone())), + }; + + result.add_error(err.into()); + + return Ok(result); + } + + Ok(result) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::identity::state_transition::identity_credit_withdrawal_transition::Pooling; + use crate::state_repository::MockStateRepositoryLike; + use crate::state_transition::state_transition_execution_context::StateTransitionExecutionContext; + use crate::system_data_contracts::load_system_data_contract; + use crate::tests::fixtures::{get_data_contract_fixture, get_withdrawal_document_fixture}; + + #[tokio::test] + async fn should_throw_error_if_withdrawal_not_found() { + let transition_execution_context = StateTransitionExecutionContext::default(); + let mut state_repository = MockStateRepositoryLike::new(); + let data_contract = get_data_contract_fixture(None); + let owner_id = data_contract.owner_id().to_owned(); + + state_repository + .expect_fetch_documents::() + .returning(|_, _, _, _| Ok(vec![])); + + let document_transition = DocumentTransition::Delete(Default::default()); + let data_trigger_context = DataTriggerExecutionContext { + data_contract: &data_contract, + owner_id: &owner_id, + state_repository: &state_repository, + state_transition_execution_context: &transition_execution_context, + }; + + let result = + delete_withdrawal_data_trigger(&document_transition, &data_trigger_context, None) + .await + .expect("the execution result should be returned"); + + assert!(!result.is_ok()); + + let error = result.get_errors().get(0).unwrap(); + + assert_eq!(error.to_string(), "Withdrawal document was not found"); + } + + #[tokio::test] + async fn should_throw_error_if_withdrawal_has_wrong_status() { + let transition_execution_context = StateTransitionExecutionContext::default(); + let mut state_repository = MockStateRepositoryLike::new(); + let data_contract = + load_system_data_contract(data_contracts::SystemDataContract::Withdrawals) + .expect("to load system data contract"); + let owner_id = data_contract.owner_id().to_owned(); + + let document = get_withdrawal_document_fixture( + &data_contract, + owner_id, + json!({ + "amount": 1000, + "coreFeePerByte": 1, + "pooling": Pooling::Never, + "outputScript": (0..23).collect::>(), + "status": withdrawals_contract::WithdrawalStatus::BROADCASTED, + "transactionIndex": 1, + "transactionSignHeight": 93, + "transactionId": vec![1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + }), + ); + + state_repository + .expect_fetch_documents::() + .return_once(move |_, _, _, _| Ok(vec![document])); + + let document_transition = DocumentTransition::Delete(Default::default()); + let data_trigger_context = DataTriggerExecutionContext { + data_contract: &data_contract, + owner_id: &owner_id, + state_repository: &state_repository, + state_transition_execution_context: &transition_execution_context, + }; + + let result = + delete_withdrawal_data_trigger(&document_transition, &data_trigger_context, None) + .await + .expect("the execution result should be returned"); + + assert!(!result.is_ok()); + + let error = result.get_errors().get(0).unwrap(); + + assert_eq!( + error.to_string(), + "withdrawal deletion is allowed only for COMPLETE and EXPIRED statuses" + ); + } +} diff --git a/packages/rs-dpp/src/document/document_stub.rs b/packages/rs-dpp/src/document/document_stub.rs index b0cdc4d7f30..a1150343c69 100644 --- a/packages/rs-dpp/src/document/document_stub.rs +++ b/packages/rs-dpp/src/document/document_stub.rs @@ -37,8 +37,8 @@ use std::convert::{TryFrom, TryInto}; use std::fmt; use std::io::{BufReader, Read}; -use ciborium::value::Value; -use integer_encoding::VarIntWriter; +use ciborium::value::{Integer, Value}; +use integer_encoding::{VarInt, VarIntReader, VarIntWriter}; use crate::data_contract::{DataContract, DriveContractExt}; use serde::{Deserialize, Serialize}; @@ -54,6 +54,11 @@ use crate::util::deserializer; use crate::util::deserializer::SplitProtocolVersionOutcome; use crate::ProtocolError; +use crate::document::document_transition::INITIAL_REVISION; +use crate::prelude::*; +use crate::util::cbor_value::CborBTreeMapHelper; +use anyhow::{anyhow, bail}; + //todo: rename /// Documents contain the data that goes into data contracts. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] @@ -69,6 +74,10 @@ pub struct DocumentStub { /// The ID of the document's owner. #[serde(rename = "$ownerId")] pub owner_id: [u8; 32], + + /// The document revision. + #[serde(rename = "$revision")] + pub revision: Revision, } impl DocumentStub { @@ -79,6 +88,9 @@ impl DocumentStub { pub fn serialize(&self, document_type: &DocumentType) -> Result, ProtocolError> { let mut buffer: Vec = self.id.as_slice().to_vec(); buffer.extend(self.owner_id.as_slice()); + if document_type.documents_mutable { + buffer.append(&mut self.revision.encode_var_vec()); + } document_type .properties .iter() @@ -113,6 +125,9 @@ impl DocumentStub { let mut buffer: Vec = Vec::try_from(self.id).unwrap(); let mut owner_id = Vec::try_from(self.owner_id).unwrap(); buffer.append(&mut owner_id); + if document_type.documents_mutable { + buffer.append(&mut self.revision.encode_var_vec()); + } document_type .properties .iter() @@ -149,14 +164,27 @@ impl DocumentStub { } let mut id = [0; 32]; buf.read_exact(&mut id).map_err(|_| { - ProtocolError::DecodingError("error reading from serialized document".to_string()) + ProtocolError::DecodingError("error reading id from serialized document".to_string()) })?; let mut owner_id = [0; 32]; buf.read_exact(&mut owner_id).map_err(|_| { - ProtocolError::DecodingError("error reading from serialized document".to_string()) + ProtocolError::DecodingError( + "error reading owner id from serialized document".to_string(), + ) })?; + let revision = if document_type.documents_mutable { + let revision: Revision = buf.read_varint().map_err(|_| { + ProtocolError::DataContractError(DataContractError::CorruptedSerialization( + "error reading varint revision from serialized document", + )) + })?; + revision + } else { + INITIAL_REVISION as Revision + }; + let properties = document_type .properties .iter() @@ -172,6 +200,7 @@ impl DocumentStub { id, properties, owner_id, + revision, }) } @@ -192,7 +221,7 @@ impl DocumentStub { let mut document: BTreeMap = ciborium::de::from_reader(read_document_cbor) .map_err(|_| { ProtocolError::StructureError(StructureError::InvalidCBOR( - "unable to decode contract for document call", + "unable to decode document for document call", )) })?; @@ -239,11 +268,16 @@ impl DocumentStub { } .expect("document_id must be 32 bytes"); + let revision: Revision = document + .remove_optional_integer("$revision")? + .unwrap_or(INITIAL_REVISION as Revision); + // dev-note: properties is everything other than the id and owner id Ok(DocumentStub { properties: document, owner_id, id, + revision, }) } @@ -265,6 +299,7 @@ impl DocumentStub { DataContractError::FieldRequirementUnmet("invalid document id"), )); } + let SplitProtocolVersionOutcome { main_message_bytes: read_document_cbor, .. @@ -279,6 +314,8 @@ impl DocumentStub { )) })?; + let revision: Revision = properties.get_integer("$revision")?; + // dev-note: properties is everything other than the id and owner id Ok(DocumentStub { properties, @@ -288,6 +325,7 @@ impl DocumentStub { id: document_id .try_into() .expect("try_into shouldn't fail, document_id must be 32 bytes"), + revision, }) } @@ -383,6 +421,102 @@ impl DocumentStub { })?; self.get_raw_for_document_type(key, document_type, owner_id) } + + /// Temporary helper method to get property in u64 format + /// Imitating JsonValueExt trait + pub fn get_u64(&self, property_name: &str) -> Result { + let property_value = self.properties.get(property_name).ok_or_else(|| { + anyhow!( + "the property '{}' doesn't exist in '{:?}'", + property_name, + self + ) + })?; + + if let Value::Integer(s) = property_value { + return (*s) + .try_into() + .map_err(|_| anyhow!("unable convert {} to u64", property_name)); + } + bail!( + "getting property '{}' failed: {:?} isn't a number", + property_name, + property_value + ); + } + + /// Temporary helper method to get property in u32 format + /// Imitating JsonValueExt trait + pub fn get_u32(&self, property_name: &str) -> Result { + let property_value = + self.properties + .get(property_name) + .ok_or(ProtocolError::DocumentKeyMissing(format!( + "the property '{}' doesn't exist in '{:?}'", + property_name, self + )))?; + + if let Value::Integer(s) = property_value { + (*s).try_into() + .map_err(|_| ProtocolError::DecodingError("expected a u32 integer".to_string())) + } else { + Err(ProtocolError::DecodingError( + "expected an integer".to_string(), + )) + } + } + + /// Temporary helper method to get property in bytes format + /// Imitating JsonValueExt trait + pub fn get_bytes(&self, property_name: &str) -> Result, anyhow::Error> { + let property_value = self.properties.get(property_name).ok_or_else(|| { + anyhow!( + "the property '{}' doesn't exist in '{:?}'", + property_name, + self + ) + })?; + + if let Value::Bytes(s) = property_value { + return Ok(s.clone()); + } + bail!( + "getting property '{}' failed: {:?} isn't an array of bytes", + property_name, + property_value + ); + } + + pub fn set_u8(&mut self, property_name: &str, value: u8) { + self.properties.insert( + property_name.to_string(), + Value::Integer(Integer::from(value)), + ); + } + + pub fn set_i64(&mut self, property_name: &str, value: i64) { + self.properties.insert( + property_name.to_string(), + Value::Integer(Integer::from(value)), + ); + } + + pub fn set_bytes(&mut self, property_name: &str, value: Vec) { + self.properties + .insert(property_name.to_string(), Value::Bytes(value)); + } + + pub fn increment_revision(&mut self) -> Result<(), ProtocolError> { + let revision = self.revision; + + let new_revision = revision + .checked_add(1) + .ok_or(ProtocolError::Overflow("overflow when adding 1"))?; + + self.revision = new_revision; + + Ok(()) + } } impl fmt::Display for DocumentStub { diff --git a/packages/rs-dpp/src/document/mod.rs b/packages/rs-dpp/src/document/mod.rs index 81afa957835..5618f2b6f52 100644 --- a/packages/rs-dpp/src/document/mod.rs +++ b/packages/rs-dpp/src/document/mod.rs @@ -260,6 +260,11 @@ impl Document { self.data = data; } + /// Increment document's revision + pub fn increment_revision(&mut self) { + self.revision += 1; + } + /// Get entropy pub fn get_entropy(&self) -> &[u8] { &self.entropy diff --git a/packages/rs-dpp/src/document/state_transition/documents_batch_transition/validation/state/validate_documents_batch_transition_state.rs b/packages/rs-dpp/src/document/state_transition/documents_batch_transition/validation/state/validate_documents_batch_transition_state.rs index 989f479e4af..baf2ae669e7 100644 --- a/packages/rs-dpp/src/document/state_transition/documents_batch_transition/validation/state/validate_documents_batch_transition_state.rs +++ b/packages/rs-dpp/src/document/state_transition/documents_batch_transition/validation/state/validate_documents_batch_transition_state.rs @@ -13,7 +13,7 @@ use crate::{ document_transition::{Action, DocumentTransition, DocumentTransitionExt}, Document, DocumentsBatchTransition, }, - prelude::{Identifier, TimestampMillis}, + prelude::{Identifier, Revision, TimestampMillis}, state_repository::StateRepositoryLike, state_transition::{ state_transition_execution_context::StateTransitionExecutionContext, @@ -258,7 +258,7 @@ fn check_revision( result.add_error(ConsensusError::StateError(Box::new( StateError::InvalidDocumentRevisionError { document_id: document_transition.base().id, - current_revision: fetched_document.revision, + current_revision: fetched_document.revision as Revision, }, ))) } diff --git a/packages/rs-dpp/src/errors/abstract_state_error.rs b/packages/rs-dpp/src/errors/abstract_state_error.rs index 780603bf2c2..1754aca942a 100644 --- a/packages/rs-dpp/src/errors/abstract_state_error.rs +++ b/packages/rs-dpp/src/errors/abstract_state_error.rs @@ -44,7 +44,7 @@ pub enum StateError { )] InvalidDocumentRevisionError { document_id: Identifier, - current_revision: u32, + current_revision: Revision, }, #[error("Data Contract {data_contract_id} is already present")] diff --git a/packages/rs-dpp/src/errors/codes.rs b/packages/rs-dpp/src/errors/codes.rs index 102ade95e93..d7db9ded82e 100644 --- a/packages/rs-dpp/src/errors/codes.rs +++ b/packages/rs-dpp/src/errors/codes.rs @@ -44,6 +44,7 @@ impl ErrorWithCode for ConsensusError { Self::IdentityInsufficientBalanceError(_) => 4024, Self::InvalidIdentityCreditWithdrawalTransitionCoreFeeError(_) => 4025, Self::InvalidIdentityCreditWithdrawalTransitionOutputScriptError(_) => 4026, + Self::NotImplementedIdentityCreditWithdrawalTransitionPoolingError(_) => 4027, Self::StateError(e) => e.get_code(), Self::BasicError(e) => e.get_code(), diff --git a/packages/rs-dpp/src/errors/consensus/abstract_consensus_error.rs b/packages/rs-dpp/src/errors/consensus/abstract_consensus_error.rs index 83652786526..af8a271d204 100644 --- a/packages/rs-dpp/src/errors/consensus/abstract_consensus_error.rs +++ b/packages/rs-dpp/src/errors/consensus/abstract_consensus_error.rs @@ -25,6 +25,7 @@ use crate::errors::StateError; use super::basic::identity::{ IdentityInsufficientBalanceError, InvalidIdentityCreditWithdrawalTransitionCoreFeeError, InvalidIdentityCreditWithdrawalTransitionOutputScriptError, + NotImplementedIdentityCreditWithdrawalTransitionPoolingError, }; use super::fee::FeeError; use super::signature::SignatureError; @@ -87,6 +88,11 @@ pub enum ConsensusError { InvalidIdentityCreditWithdrawalTransitionOutputScriptError, ), + #[error("{0}")] + NotImplementedIdentityCreditWithdrawalTransitionPoolingError( + NotImplementedIdentityCreditWithdrawalTransitionPoolingError, + ), + #[error(transparent)] StateError(Box), @@ -163,6 +169,7 @@ impl ConsensusError { ConsensusError::IdentityInsufficientBalanceError(_) => 4024, ConsensusError::InvalidIdentityCreditWithdrawalTransitionCoreFeeError(_) => 4025, ConsensusError::InvalidIdentityCreditWithdrawalTransitionOutputScriptError(_) => 4026, + ConsensusError::NotImplementedIdentityCreditWithdrawalTransitionPoolingError(_) => 4027, ConsensusError::StateError(e) => e.get_code(), ConsensusError::BasicError(e) => e.get_code(), diff --git a/packages/rs-dpp/src/errors/consensus/basic/identity/invalid_credit_withdrawal_transition_core_fee_error.rs b/packages/rs-dpp/src/errors/consensus/basic/identity/invalid_credit_withdrawal_transition_core_fee_error.rs index 9e206b3ca38..2255397688d 100644 --- a/packages/rs-dpp/src/errors/consensus/basic/identity/invalid_credit_withdrawal_transition_core_fee_error.rs +++ b/packages/rs-dpp/src/errors/consensus/basic/identity/invalid_credit_withdrawal_transition_core_fee_error.rs @@ -3,18 +3,18 @@ use thiserror::Error; use crate::consensus::ConsensusError; #[derive(Error, Debug, Clone, PartialEq, Eq)] -#[error("Core fee {core_fee:?} must be part of fibonacci sequence")] +#[error("Core fee per byte {core_fee_per_byte:?} must be part of fibonacci sequence")] pub struct InvalidIdentityCreditWithdrawalTransitionCoreFeeError { - core_fee: u32, + core_fee_per_byte: u32, } impl InvalidIdentityCreditWithdrawalTransitionCoreFeeError { - pub fn new(core_fee: u32) -> Self { - Self { core_fee } + pub fn new(core_fee_per_byte: u32) -> Self { + Self { core_fee_per_byte } } - pub fn core_fee(&self) -> u32 { - self.core_fee + pub fn core_fee_per_byte(&self) -> u32 { + self.core_fee_per_byte } } diff --git a/packages/rs-dpp/src/errors/consensus/basic/identity/invalid_credit_withdrawal_transition_pooling_error.rs b/packages/rs-dpp/src/errors/consensus/basic/identity/invalid_credit_withdrawal_transition_pooling_error.rs new file mode 100644 index 00000000000..8430c8f627e --- /dev/null +++ b/packages/rs-dpp/src/errors/consensus/basic/identity/invalid_credit_withdrawal_transition_pooling_error.rs @@ -0,0 +1,27 @@ +use thiserror::Error; + +use crate::consensus::ConsensusError; + +#[derive(Error, Debug, Clone, PartialEq, Eq)] +#[error( + "pooling {pooling:?} should be equal to 0. Other pooling mechanism are not implemented yet" +)] +pub struct NotImplementedIdentityCreditWithdrawalTransitionPoolingError { + pooling: u8, +} + +impl NotImplementedIdentityCreditWithdrawalTransitionPoolingError { + pub fn new(pooling: u8) -> Self { + Self { pooling } + } + + pub fn pooling(&self) -> u8 { + self.pooling + } +} + +impl From for ConsensusError { + fn from(err: NotImplementedIdentityCreditWithdrawalTransitionPoolingError) -> Self { + Self::NotImplementedIdentityCreditWithdrawalTransitionPoolingError(err) + } +} diff --git a/packages/rs-dpp/src/errors/consensus/basic/identity/mod.rs b/packages/rs-dpp/src/errors/consensus/basic/identity/mod.rs index 80d65e7a8ac..2275fb0a624 100644 --- a/packages/rs-dpp/src/errors/consensus/basic/identity/mod.rs +++ b/packages/rs-dpp/src/errors/consensus/basic/identity/mod.rs @@ -10,6 +10,7 @@ pub use invalid_asset_lock_proof_transaction_height_error::*; pub use invalid_asset_lock_transaction_output_return_size::*; pub use invalid_credit_withdrawal_transition_core_fee_error::*; pub use invalid_credit_withdrawal_transition_output_script_error::*; +pub use invalid_credit_withdrawal_transition_pooling_error::*; pub use invalid_identity_asset_lock_transaction_error::*; pub use invalid_identity_asset_lock_transaction_output_error::*; pub use invalid_identity_public_key_data_error::*; @@ -30,6 +31,7 @@ mod invalid_asset_lock_proof_transaction_height_error; mod invalid_asset_lock_transaction_output_return_size; mod invalid_credit_withdrawal_transition_core_fee_error; mod invalid_credit_withdrawal_transition_output_script_error; +mod invalid_credit_withdrawal_transition_pooling_error; mod invalid_identity_asset_lock_transaction_error; mod invalid_identity_asset_lock_transaction_output_error; mod invalid_identity_public_key_data_error; diff --git a/packages/rs-dpp/src/errors/errors.rs b/packages/rs-dpp/src/errors/errors.rs index e257d280497..11d82bc8bcf 100644 --- a/packages/rs-dpp/src/errors/errors.rs +++ b/packages/rs-dpp/src/errors/errors.rs @@ -117,6 +117,14 @@ pub enum ProtocolError { #[error("Identity is not present")] IdentityNotPresentError { id: Identifier }, + + /// Error + #[error("overflow error: {0}")] + Overflow(&'static str), + + /// Error + #[error("missing key: {0}")] + DocumentKeyMissing(String), } impl From for ProtocolError { diff --git a/packages/rs-dpp/src/identity/identity.rs b/packages/rs-dpp/src/identity/identity.rs index 6f53e6d19f8..62b6be81b0a 100644 --- a/packages/rs-dpp/src/identity/identity.rs +++ b/packages/rs-dpp/src/identity/identity.rs @@ -162,6 +162,17 @@ impl Identity { self.revision } + /// Increment revision + pub fn increment_revision(&mut self) -> Result<(), ProtocolError> { + let result = self.revision.checked_add(1).ok_or(ProtocolError::Generic( + "identity revision is at max level".to_string(), + ))?; + + self.revision = result; + + Ok(()) + } + /// Get metadata pub fn get_metadata(&self) -> Option<&Metadata> { self.metadata.as_ref() diff --git a/packages/rs-dpp/src/identity/state_transition/identity_credit_withdrawal_transition/apply_identity_credit_withdrawal_transition_factory.rs b/packages/rs-dpp/src/identity/state_transition/identity_credit_withdrawal_transition/apply_identity_credit_withdrawal_transition_factory.rs index 58048875de4..b17f11f0505 100644 --- a/packages/rs-dpp/src/identity/state_transition/identity_credit_withdrawal_transition/apply_identity_credit_withdrawal_transition_factory.rs +++ b/packages/rs-dpp/src/identity/state_transition/identity_credit_withdrawal_transition/apply_identity_credit_withdrawal_transition_factory.rs @@ -1,20 +1,22 @@ use anyhow::{anyhow, Result}; -use dashcore::{ - blockdata::transaction::special_transaction::asset_unlock::unqualified_asset_unlock::{ - AssetUnlockBasePayload, AssetUnlockBaseTransactionInfo, - }, - consensus::Encodable, - Script, TxOut, -}; +use dashcore::{consensus, BlockHeader}; use lazy_static::__Deref; +use std::convert::TryInto; + +use serde_json::json; use crate::{ - identity::convert_credits_to_satoshi, state_repository::StateRepositoryLike, - state_transition::StateTransitionLike, + contracts::withdrawals_contract, data_contract::DataContract, document::generate_document_id, + document::Document, identity::state_transition::identity_credit_withdrawal_transition::Pooling, + state_repository::StateRepositoryLike, state_transition::StateTransitionLike, + util::entropy_generator::generate, }; use super::IdentityCreditWithdrawalTransition; +const PLATFORM_BLOCK_HEADER_TIME_PROPERTY: &str = "time"; +const PLATFORM_BLOCK_HEADER_TIME_SECONDS_PROPERTY: &str = "seconds"; + pub struct ApplyIdentityCreditWithdrawalTransition where SR: StateRepositoryLike, @@ -34,37 +36,90 @@ where &self, state_transition: &IdentityCreditWithdrawalTransition, ) -> Result<()> { - let latest_withdrawal_index = self + let data_contract_id = withdrawals_contract::CONTRACT_ID.deref(); + + let maybe_withdrawals_data_contract: Option = self .state_repository - .fetch_latest_withdrawal_transaction_index() + .fetch_data_contract(data_contract_id, state_transition.get_execution_context()) + .await? + .map(TryInto::try_into) + .transpose() + .map_err(Into::into)?; + + let withdrawals_data_contract = maybe_withdrawals_data_contract + .ok_or_else(|| anyhow!("Withdrawals data contract not found"))?; + + let latest_platform_block_header_bytes: Vec = self + .state_repository + .fetch_latest_platform_block_header() .await?; - let output_script: Script = state_transition.output_script.deref().clone(); + let latest_platform_block_header: BlockHeader = + consensus::deserialize(&latest_platform_block_header_bytes)?; - let tx_out = TxOut { - value: convert_credits_to_satoshi(state_transition.amount), - script_pubkey: output_script, - }; + let document_type = String::from(withdrawals_contract::document_types::WITHDRAWAL); + let document_created_at_millis: i64 = latest_platform_block_header.time as i64 * 1000i64; - let withdrawal_transaction = AssetUnlockBaseTransactionInfo { - version: 1, - lock_time: 0, - output: vec![tx_out], - base_payload: AssetUnlockBasePayload { - version: 1, - index: latest_withdrawal_index + 1, - fee: state_transition.core_fee, - }, - }; + let document_data = json!({ + withdrawals_contract::property_names::AMOUNT: state_transition.amount, + withdrawals_contract::property_names::CORE_FEE_PER_BYTE: state_transition.core_fee_per_byte, + withdrawals_contract::property_names::POOLING: Pooling::Never, + withdrawals_contract::property_names::OUTPUT_SCRIPT: state_transition.output_script.as_bytes(), + withdrawals_contract::property_names::STATUS: withdrawals_contract::WithdrawalStatus::QUEUED, + }); - let mut transaction_buffer: Vec = vec![]; + let mut document_id; - withdrawal_transaction - .consensus_encode(&mut transaction_buffer) - .map_err(|e| anyhow!(e))?; + loop { + let document_entropy = generate()?; + + document_id = generate_document_id::generate_document_id( + data_contract_id, + &state_transition.identity_id, + &document_type, + &document_entropy, + ); + + let documents: Vec = self + .state_repository + .fetch_documents( + withdrawals_contract::CONTRACT_ID.deref(), + withdrawals_contract::document_types::WITHDRAWAL, + json!({ + "where": [ + ["$id", "==", document_id], + ], + }), + &state_transition.execution_context, + ) + .await?; + + if documents.is_empty() { + break; + } + } + + // TODO: use DocumentFactory once it is complete + let withdrawal_document = Document { + protocol_version: state_transition.protocol_version, + id: document_id, + document_type, + revision: 0, + data_contract_id: *data_contract_id, + owner_id: state_transition.identity_id, + created_at: Some(document_created_at_millis), + updated_at: Some(document_created_at_millis), + data: document_data, + data_contract: withdrawals_data_contract, + metadata: None, + entropy: [0; 32], + }; self.state_repository - .enqueue_withdrawal_transaction(latest_withdrawal_index, transaction_buffer) + .create_document( + &withdrawal_document, + state_transition.get_execution_context(), + ) .await?; // TODO: we need to be able to batch state repository operations diff --git a/packages/rs-dpp/src/identity/state_transition/identity_credit_withdrawal_transition/mod.rs b/packages/rs-dpp/src/identity/state_transition/identity_credit_withdrawal_transition/mod.rs index 96640ebc350..04972f7f0b8 100644 --- a/packages/rs-dpp/src/identity/state_transition/identity_credit_withdrawal_transition/mod.rs +++ b/packages/rs-dpp/src/identity/state_transition/identity_credit_withdrawal_transition/mod.rs @@ -6,7 +6,7 @@ use serde_repr::{Deserialize_repr, Serialize_repr}; use crate::version::LATEST_VERSION; use crate::{ identity::{core_script::CoreScript, KeyID}, - prelude::Identifier, + prelude::{Identifier, Revision}, state_transition::{ state_transition_execution_context::StateTransitionExecutionContext, StateTransitionConvert, StateTransitionIdentitySigned, StateTransitionLike, @@ -49,9 +49,10 @@ pub struct IdentityCreditWithdrawalTransition { pub transition_type: StateTransitionType, pub identity_id: Identifier, pub amount: u64, - pub core_fee: u32, + pub core_fee_per_byte: u32, pub pooling: Pooling, pub output_script: CoreScript, + pub revision: Revision, pub signature_public_key_id: KeyID, pub signature: Vec, #[serde(skip)] @@ -65,9 +66,10 @@ impl std::default::Default for IdentityCreditWithdrawalTransition { transition_type: StateTransitionType::IdentityCreditWithdrawal, identity_id: Default::default(), amount: Default::default(), - core_fee: Default::default(), + core_fee_per_byte: Default::default(), pooling: Default::default(), output_script: Default::default(), + revision: Default::default(), signature_public_key_id: Default::default(), signature: Default::default(), execution_context: Default::default(), @@ -113,6 +115,14 @@ impl IdentityCreditWithdrawalTransition { pub fn get_modified_data_ids(&self) -> Vec<&Identifier> { vec![&self.identity_id] } + + pub fn set_revision(&mut self, revision: Revision) { + self.revision = revision; + } + + pub fn get_revision(&self) -> Revision { + self.revision + } } impl StateTransitionIdentitySigned for IdentityCreditWithdrawalTransition { diff --git a/packages/rs-dpp/src/identity/state_transition/identity_credit_withdrawal_transition/validation/basic/validate_identity_credit_withdrawal_transition_basic.rs b/packages/rs-dpp/src/identity/state_transition/identity_credit_withdrawal_transition/validation/basic/validate_identity_credit_withdrawal_transition_basic.rs index a5d77c043c6..34ccf4c9525 100644 --- a/packages/rs-dpp/src/identity/state_transition/identity_credit_withdrawal_transition/validation/basic/validate_identity_credit_withdrawal_transition_basic.rs +++ b/packages/rs-dpp/src/identity/state_transition/identity_credit_withdrawal_transition/validation/basic/validate_identity_credit_withdrawal_transition_basic.rs @@ -7,9 +7,14 @@ use crate::{ consensus::basic::identity::{ InvalidIdentityCreditWithdrawalTransitionCoreFeeError, InvalidIdentityCreditWithdrawalTransitionOutputScriptError, + NotImplementedIdentityCreditWithdrawalTransitionPoolingError, }, + contracts::withdrawals_contract, identity::core_script::CoreScript, - util::{is_fibonacci_number::is_fibonacci_number, protocol_data::get_protocol_version}, + util::{ + is_fibonacci_number::is_fibonacci_number, json_value::JsonValueExt, + protocol_data::get_protocol_version, + }, validation::{JsonSchemaValidator, ValidationResult}, version::ProtocolVersionValidator, DashPlatformProtocolInitError, NonConsensusError, SerdeParsingError, @@ -71,29 +76,38 @@ impl IdentityCreditWithdrawalTransitionBasicValidator { return Ok(result); } + // validate pooling is always equals to 0 + let pooling = transition_json.get_u8(withdrawals_contract::property_names::POOLING)?; + + if pooling > 0 { + result.add_error( + NotImplementedIdentityCreditWithdrawalTransitionPoolingError::new(pooling), + ); + + return Ok(result); + } + // validate core_fee is in fibonacci sequence - let core_fee = transition_json - .get("coreFee") - .ok_or_else(|| { - SerdeParsingError::new("Expected credit withdrawal transition to have coreFee") - })? - .as_u64() - .ok_or_else(|| SerdeParsingError::new("Expected coreFee to be a uint"))?; + let core_fee_per_byte = + transition_json.get_u32(withdrawals_contract::property_names::CORE_FEE_PER_BYTE)?; - if !is_fibonacci_number(core_fee) { + if !is_fibonacci_number(core_fee_per_byte) { result.add_error(InvalidIdentityCreditWithdrawalTransitionCoreFeeError::new( - core_fee as u32, + core_fee_per_byte, )); - } - if !result.is_valid() { return Ok(result); } // validate output_script types - let output_script_value = transition_json.get("outputScript").ok_or_else(|| { - SerdeParsingError::new("Expected credit withdrawal transition to have outputScript") - })?; + let output_script_value = transition_json + .get(withdrawals_contract::property_names::OUTPUT_SCRIPT) + .ok_or_else(|| { + SerdeParsingError::new(format!( + "Expected credit withdrawal transition to have {} property", + withdrawals_contract::property_names::OUTPUT_SCRIPT + )) + })?; let output_script_bytes: Vec = serde_json::from_value(output_script_value.clone())?; diff --git a/packages/rs-dpp/src/identity/state_transition/identity_credit_withdrawal_transition/validation/state/validate_identity_credit_withdrawal_transition_state.rs b/packages/rs-dpp/src/identity/state_transition/identity_credit_withdrawal_transition/validation/state/validate_identity_credit_withdrawal_transition_state.rs index 35f7e906b80..5d0132b7c11 100644 --- a/packages/rs-dpp/src/identity/state_transition/identity_credit_withdrawal_transition/validation/state/validate_identity_credit_withdrawal_transition_state.rs +++ b/packages/rs-dpp/src/identity/state_transition/identity_credit_withdrawal_transition/validation/state/validate_identity_credit_withdrawal_transition_state.rs @@ -9,7 +9,7 @@ use crate::{ state_repository::StateRepositoryLike, state_transition::StateTransitionLike, validation::ValidationResult, - NonConsensusError, + NonConsensusError, StateError, }; pub struct IdentityCreditWithdrawalTransitionValidator @@ -33,6 +33,7 @@ where ) -> Result, NonConsensusError> { let mut result: ValidationResult<()> = ValidationResult::default(); + // TODO: Use fetchIdentityBalance let maybe_existing_identity = self .state_repository .fetch_identity( @@ -45,17 +46,14 @@ where .map_err(Into::into) .map_err(|e| NonConsensusError::StateRepositoryFetchError(e.to_string()))?; - let existing_identity = match maybe_existing_identity { - None => { - let err = BasicError::IdentityNotFoundError { - identity_id: state_transition.identity_id, - }; + let Some(existing_identity) = maybe_existing_identity else { + let err = BasicError::IdentityNotFoundError { + identity_id: state_transition.identity_id, + }; - result.add_error(err); + result.add_error(err); - return Ok(result); - } - Some(identity) => identity, + return Ok(result); }; if existing_identity.get_balance() < state_transition.amount { @@ -69,6 +67,16 @@ where return Ok(result); } + // Check revision + if existing_identity.get_revision() != (state_transition.get_revision() - 1) { + result.add_error(StateError::InvalidIdentityRevisionError { + identity_id: existing_identity.get_id().to_owned(), + current_revision: existing_identity.get_revision(), + }); + + return Ok(result); + } + Ok(result) } } diff --git a/packages/rs-dpp/src/lib.rs b/packages/rs-dpp/src/lib.rs index 14f5cf2c583..f9ff069dcbf 100644 --- a/packages/rs-dpp/src/lib.rs +++ b/packages/rs-dpp/src/lib.rs @@ -12,7 +12,7 @@ pub use convertible::Convertible; pub use dash_platform_protocol::DashPlatformProtocol; pub use errors::*; -mod contracts; +pub mod contracts; pub mod data_contract; mod convertible; @@ -38,10 +38,11 @@ pub mod block_time_window; pub mod mocks; mod bls; -pub mod system_data_contracts; -#[cfg(test)] -mod tests; +#[cfg(feature = "fixtures-and-mocks")] +pub mod tests; + +pub mod system_data_contracts; pub use bls::*; pub mod prelude { diff --git a/packages/rs-dpp/src/schema/identity/stateTransition/identityCreditWithdrawal.json b/packages/rs-dpp/src/schema/identity/stateTransition/identityCreditWithdrawal.json index 69692a8a69e..c689d54afd9 100644 --- a/packages/rs-dpp/src/schema/identity/stateTransition/identityCreditWithdrawal.json +++ b/packages/rs-dpp/src/schema/identity/stateTransition/identityCreditWithdrawal.json @@ -19,16 +19,22 @@ }, "amount": { "type": "integer", + "description": "The amount to be withdrawn", "minimum": 1000 }, - "coreFee": { + "coreFeePerByte": { "type": "integer", - "minimum": 1 + "description": "This is the fee that you are willing to spend for this transaction in Duffs/Byte", + "minimum": 1, + "maximum": 4294967295 }, "pooling": { "type": "integer", + "description": "This indicated the level at which Platform should try to pool this transaction", "enum": [ - 0 + 0, + 1, + 2 ] }, "outputScript": { @@ -46,6 +52,11 @@ "signaturePublicKeyId": { "type": "integer", "minimum": 0 + }, + "revision": { + "type": "integer", + "minimum": 0, + "description": "Target identity revision" } }, "additionalProperties": false, @@ -54,10 +65,11 @@ "type", "identityId", "amount", - "coreFee", + "coreFeePerByte", "pooling", "outputScript", "signature", - "signaturePublicKeyId" + "signaturePublicKeyId", + "revision" ] -} +} \ No newline at end of file diff --git a/packages/rs-dpp/src/state_repository.rs b/packages/rs-dpp/src/state_repository.rs index 72695c24fbb..0b83742e51b 100644 --- a/packages/rs-dpp/src/state_repository.rs +++ b/packages/rs-dpp/src/state_repository.rs @@ -1,7 +1,7 @@ use anyhow::Result as AnyResult; use async_trait::async_trait; use dashcore::InstantLock; -#[cfg(any(test, feature = "mocks"))] +#[cfg(feature = "fixtures-and-mocks")] use mockall::{automock, predicate::*}; use serde_json::Value as JsonValue; use std::convert::{Infallible, TryInto}; @@ -25,7 +25,7 @@ pub struct FetchTransactionResponse { } // Let StateRepositoryLike mock return DataContracts instead of bytes to simplify things a bit. -#[cfg_attr(any(test, feature="mocks"), automock( +#[cfg_attr(any(test, feature="fixtures-and-mocks"), automock( type ConversionError=Infallible; type FetchDataContract=DataContract; type FetchIdentity=Identity; @@ -213,11 +213,4 @@ pub trait StateRepositoryLike: Sync { // Get latest (in a queue) withdrawal transaction index async fn fetch_latest_platform_core_chain_locked_height(&self) -> AnyResult>; - - // Enqueue withdrawal transaction - async fn enqueue_withdrawal_transaction( - &self, - index: u64, - transaction_bytes: Vec, - ) -> AnyResult<()>; } diff --git a/packages/rs-dpp/src/tests/fixtures/get_documents_fixture.rs b/packages/rs-dpp/src/tests/fixtures/get_documents_fixture.rs index e0149958404..42d54b33399 100644 --- a/packages/rs-dpp/src/tests/fixtures/get_documents_fixture.rs +++ b/packages/rs-dpp/src/tests/fixtures/get_documents_fixture.rs @@ -1,8 +1,9 @@ use std::sync::Arc; -use serde_json::json; +use serde_json::{json, Value}; use crate::{ + contracts::withdrawals_contract, document::{ document_factory::DocumentFactory, fetch_and_validate_data_contract::DataContractFetcherAndValidator, @@ -109,6 +110,27 @@ fn get_documents( Ok(documents) } +pub fn get_withdrawal_document_fixture( + data_contract: &DataContract, + owner_id: Identifier, + data: Value, +) -> Document { + let factory = DocumentFactory::new( + LATEST_VERSION, + get_document_validator_fixture(), + DataContractFetcherAndValidator::new(Arc::new(MockStateRepositoryLike::new())), + ); + + factory + .create( + data_contract.clone(), + owner_id, + withdrawals_contract::document_types::WITHDRAWAL.to_string(), + data, + ) + .unwrap() +} + fn get_random_10_bytes() -> Vec { let mut buffer = [0u8; 10]; let _ = getrandom::getrandom(&mut buffer); diff --git a/packages/rs-dpp/src/tests/fixtures/get_dpns_data_contract.rs b/packages/rs-dpp/src/tests/fixtures/get_dpns_data_contract.rs index 84ca1bc496f..07bcc5bcfd8 100644 --- a/packages/rs-dpp/src/tests/fixtures/get_dpns_data_contract.rs +++ b/packages/rs-dpp/src/tests/fixtures/get_dpns_data_contract.rs @@ -33,9 +33,7 @@ pub fn get_dpns_data_contract_fixture(owner_id: Option) -> DataContr // TODO the pattern is invalid as it's a re2 document_schemas["domain"]["properties"]["normalizedParentDomainName"]["pattern"] = json!(".*"); - let mut data_contract = factory + factory .create(owner_id, document_schemas, Some(defs)) - .expect("data in fixture should be correct"); - - data_contract + .expect("data in fixture should be correct") } diff --git a/packages/rs-dpp/src/tests/fixtures/identity_credit_withdrawal_transition_fixture.rs b/packages/rs-dpp/src/tests/fixtures/identity_credit_withdrawal_transition_fixture.rs index 7e252a8a51b..e23914ab606 100644 --- a/packages/rs-dpp/src/tests/fixtures/identity_credit_withdrawal_transition_fixture.rs +++ b/packages/rs-dpp/src/tests/fixtures/identity_credit_withdrawal_transition_fixture.rs @@ -14,11 +14,12 @@ pub fn identity_credit_withdrawal_transition_fixture_raw_object() -> Value { "type": StateTransitionType::IdentityCreditWithdrawal, "identityId": vec![1_u8; 32], "amount": 1042, - "coreFee": 2, + "coreFeePerByte": 3, "pooling": Pooling::Never, "outputScript": Script::new_p2pkh(&PubkeyHash::from_hex("0000000000000000000000000000000000000000").unwrap()).to_bytes(), "signature": vec![0_u8; 65], "signaturePublicKeyId": 0, + "revision": 1, }) } @@ -28,10 +29,11 @@ pub fn identity_credit_withdrawal_transition_fixture_json() -> Value { "type": StateTransitionType::IdentityCreditWithdrawal, "identityId": encode(&[1_u8; 32], Encoding::Base58), "amount": 1042, - "coreFee": 2, + "coreFeePerByte": 3, "pooling": Pooling::Never, "outputScript": encode(&Script::new_p2pkh(&PubkeyHash::from_hex("0000000000000000000000000000000000000000").unwrap()).to_bytes(), Encoding::Base64), "signature": encode(&[0_u8; 65], Encoding::Base64), "signaturePublicKeyId": 0, + "revision": 1, }) } diff --git a/packages/rs-dpp/src/tests/fixtures/public_keys_validator_mock.rs b/packages/rs-dpp/src/tests/fixtures/public_keys_validator_mock.rs index 7de8d48f6df..373969e59bd 100644 --- a/packages/rs-dpp/src/tests/fixtures/public_keys_validator_mock.rs +++ b/packages/rs-dpp/src/tests/fixtures/public_keys_validator_mock.rs @@ -6,7 +6,7 @@ use crate::identity::validation::TPublicKeysValidator; use crate::validation::ValidationResult; use crate::NonConsensusError; -#[cfg(test)] +#[cfg(feature = "fixtures-and-mocks")] pub struct PublicKeysValidatorMock { returns: Mutex, NonConsensusError>>, returns_fn: diff --git a/packages/rs-dpp/src/tests/identity/state_transition/identity_credit_withdrawal_transition/apply_identity_credit_withdrawal_transition_factory_spec.rs b/packages/rs-dpp/src/tests/identity/state_transition/identity_credit_withdrawal_transition/apply_identity_credit_withdrawal_transition_factory_spec.rs index 2c579347ac8..ecbef4c00f2 100644 --- a/packages/rs-dpp/src/tests/identity/state_transition/identity_credit_withdrawal_transition/apply_identity_credit_withdrawal_transition_factory_spec.rs +++ b/packages/rs-dpp/src/tests/identity/state_transition/identity_credit_withdrawal_transition/apply_identity_credit_withdrawal_transition_factory_spec.rs @@ -1,17 +1,23 @@ #[cfg(test)] mod apply_identity_credit_withdrawal_transition_factory { + use dashcore::{consensus, BlockHeader}; + use serde_json::json; + use crate::{ + contracts::withdrawals_contract, + document::Document, identity::state_transition::identity_credit_withdrawal_transition::{ apply_identity_credit_withdrawal_transition_factory::ApplyIdentityCreditWithdrawalTransition, - IdentityCreditWithdrawalTransition, + IdentityCreditWithdrawalTransition, Pooling, }, state_repository::MockStateRepositoryLike, + tests::fixtures::get_data_contract_fixture, }; use mockall::predicate::{always, eq}; use std::default::Default; #[tokio::test] - async fn should_call_state_repository_methods() { + async fn should_fail_if_data_contract_was_not_found() { let mut state_repository = MockStateRepositoryLike::default(); let state_transition = IdentityCreditWithdrawalTransition { @@ -19,34 +25,95 @@ mod apply_identity_credit_withdrawal_transition_factory { ..Default::default() }; - let IdentityCreditWithdrawalTransition { - identity_id, - amount, - .. - } = state_transition.clone(); + state_repository + .expect_fetch_data_contract() + .times(1) + .returning(|_, _| anyhow::Ok(None)); + + let applier = ApplyIdentityCreditWithdrawalTransition::new(state_repository); + + match applier + .apply_identity_credit_withdrawal_transition(&state_transition) + .await + { + Ok(_) => panic!("should not be able to apply state transition"), + Err(e) => { + assert_eq!(e.to_string(), "Withdrawals data contract not found"); + } + }; + } + + #[tokio::test] + async fn should_create_withdrawal_and_reduce_balance() { + let block_time_seconds = 1675709306; + + let state_transition = IdentityCreditWithdrawalTransition { + amount: 10, + ..Default::default() + }; + + let mut state_repository = MockStateRepositoryLike::default(); state_repository - .expect_fetch_latest_withdrawal_transaction_index() + .expect_fetch_documents::() + .returning(|_, _, _, _| anyhow::Ok(vec![])); + + state_repository + .expect_fetch_data_contract() .times(1) - // trying to use values other than default to check they are actually set - .returning(|| anyhow::Ok(42)); + .returning(|_, _| anyhow::Ok(Some(get_data_contract_fixture(None)))); state_repository - .expect_enqueue_withdrawal_transaction() - .withf(|index, _| *index == 42) + .expect_fetch_latest_platform_block_header() + .times(1) + .returning(move || { + let header = BlockHeader { + time: block_time_seconds, + version: 1, + prev_blockhash: Default::default(), + merkle_root: Default::default(), + bits: Default::default(), + nonce: Default::default(), + }; + + anyhow::Ok(consensus::serialize(&header)) + }); + + state_repository + .expect_create_document() + .times(1) + .withf(move |doc, _| { + let created_at_match = doc.created_at == Some(block_time_seconds as i64 * 1000); + let updated_at_match = doc.created_at == Some(block_time_seconds as i64 * 1000); + + let document_data_match = doc.data + == json!({ + "amount": 10, + "coreFeePerByte": 0, + "pooling": Pooling::Never, + "outputScript": [], + "status": withdrawals_contract::WithdrawalStatus::QUEUED, + }); + + created_at_match && updated_at_match && document_data_match + }) .returning(|_, _| anyhow::Ok(())); state_repository .expect_remove_from_identity_balance() .times(1) // TODO: we need to assert execution context as well - .with(eq(identity_id), eq(amount), always()) + .with( + eq(state_transition.identity_id), + eq(state_transition.amount), + always(), + ) .returning(|_, _, _| anyhow::Ok(())); state_repository .expect_remove_from_system_credits() .times(1) - .with(eq(amount), always()) + .with(eq(state_transition.amount), always()) .returning(|_, _| anyhow::Ok(())); let applier = ApplyIdentityCreditWithdrawalTransition::new(state_repository); diff --git a/packages/rs-dpp/src/tests/identity/state_transition/identity_credit_withdrawal_transition/validation/basic/validate_identity_credit_withdrawal_transition_basic_spec.rs b/packages/rs-dpp/src/tests/identity/state_transition/identity_credit_withdrawal_transition/validation/basic/validate_identity_credit_withdrawal_transition_basic_spec.rs index 8882179de60..253ad859f84 100644 --- a/packages/rs-dpp/src/tests/identity/state_transition/identity_credit_withdrawal_transition/validation/basic/validate_identity_credit_withdrawal_transition_basic_spec.rs +++ b/packages/rs-dpp/src/tests/identity/state_transition/identity_credit_withdrawal_transition/validation/basic/validate_identity_credit_withdrawal_transition_basic_spec.rs @@ -282,14 +282,14 @@ mod validate_identity_credit_withdrawal_transition_basic_factory { } } - mod core_fee { + mod core_fee_per_byte { use super::*; #[tokio::test] async fn should_be_present() { let (mut raw_state_transition, validator) = setup_test(); - raw_state_transition.remove_key("coreFee"); + raw_state_transition.remove_key("coreFeePerByte"); let result = validator.validate(&raw_state_transition).await.unwrap(); @@ -302,7 +302,7 @@ mod validate_identity_credit_withdrawal_transition_basic_factory { match error.kind() { ValidationErrorKind::Required { property } => { - assert_eq!(property.to_string(), "\"coreFee\""); + assert_eq!(property.to_string(), "\"coreFeePerByte\""); } _ => panic!("Expected to be missing property"), } @@ -312,7 +312,7 @@ mod validate_identity_credit_withdrawal_transition_basic_factory { async fn should_be_integer() { let (mut raw_state_transition, validator) = setup_test(); - raw_state_transition.set_key_value("coreFee", "1"); + raw_state_transition.set_key_value("coreFeePerByte", "1"); let result = validator.validate(&raw_state_transition).await.unwrap(); @@ -320,7 +320,7 @@ mod validate_identity_credit_withdrawal_transition_basic_factory { let error = errors.first().unwrap(); - assert_eq!(error.instance_path().to_string(), "/coreFee"); + assert_eq!(error.instance_path().to_string(), "/coreFeePerByte"); assert_eq!(error.keyword().unwrap(), "type"); } @@ -328,7 +328,7 @@ mod validate_identity_credit_withdrawal_transition_basic_factory { pub async fn should_be_not_less_than_1() { let (mut raw_state_transition, validator) = setup_test(); - raw_state_transition.set_key_value("coreFee", -1); + raw_state_transition.set_key_value("coreFeePerByte", -1); let result = validator.validate(&raw_state_transition).await.unwrap(); @@ -336,15 +336,31 @@ mod validate_identity_credit_withdrawal_transition_basic_factory { let error = errors.first().unwrap(); - assert_eq!(error.instance_path().to_string(), "/coreFee"); + assert_eq!(error.instance_path().to_string(), "/coreFeePerByte"); assert_eq!(error.keyword().unwrap(), "minimum"); } + #[tokio::test] + pub async fn should_be_not_more_than_u32_max() { + let (mut raw_state_transition, validator) = setup_test(); + + raw_state_transition.set_key_value("coreFeePerByte", u32::MAX as u64 + 1u64); + + let result = validator.validate(&raw_state_transition).await.unwrap(); + + let errors = assert_consensus_errors!(result, ConsensusError::JsonSchemaError, 1); + + let error = errors.first().unwrap(); + + assert_eq!(error.instance_path().to_string(), "/coreFeePerByte"); + assert_eq!(error.keyword().unwrap(), "maximum"); + } + #[tokio::test] pub async fn should_be_in_a_fibonacci_sequence() { let (mut raw_state_transition, validator) = setup_test(); - raw_state_transition.set_key_value("coreFee", 6); + raw_state_transition.set_key_value("coreFeePerByte", 6); let result = validator.validate(&raw_state_transition).await.unwrap(); @@ -356,7 +372,7 @@ mod validate_identity_credit_withdrawal_transition_basic_factory { let error = errors.first().unwrap(); - assert_eq!(error.core_fee(), 6); + assert_eq!(error.core_fee_per_byte(), 6); } } @@ -417,6 +433,25 @@ mod validate_identity_credit_withdrawal_transition_basic_factory { assert_eq!(error.instance_path().to_string(), "/pooling"); assert_eq!(error.keyword().unwrap(), "enum"); } + + #[tokio::test] + async fn should_constraint_variant_to_0() { + let (mut raw_state_transition, validator) = setup_test(); + + raw_state_transition.set_key_value("pooling", 2); + + let result = validator.validate(&raw_state_transition).await.unwrap(); + + let errors = assert_consensus_errors!( + result, + ConsensusError::NotImplementedIdentityCreditWithdrawalTransitionPoolingError, + 1 + ); + + let error = errors.first().unwrap(); + + assert_eq!(error.pooling(), 2); + } } mod output_script { @@ -653,6 +688,6 @@ mod validate_identity_credit_withdrawal_transition_basic_factory { let result = validator.validate(&raw_state_transition).await.unwrap(); - assert_eq!(result.is_valid(), true); + assert!(result.is_valid()); } } diff --git a/packages/rs-dpp/src/tests/identity/state_transition/identity_credit_withdrawal_transition/validation/state/validate_identity_credit_withdrawal_state_spec.rs b/packages/rs-dpp/src/tests/identity/state_transition/identity_credit_withdrawal_transition/validation/state/validate_identity_credit_withdrawal_state_spec.rs index 43457371d21..599215919c6 100644 --- a/packages/rs-dpp/src/tests/identity/state_transition/identity_credit_withdrawal_transition/validation/state/validate_identity_credit_withdrawal_state_spec.rs +++ b/packages/rs-dpp/src/tests/identity/state_transition/identity_credit_withdrawal_transition/validation/state/validate_identity_credit_withdrawal_state_spec.rs @@ -109,7 +109,7 @@ mod validate_identity_credit_withdrawal_transition_state_factory { .await; match result { - Ok(_) => assert!(false, "should not return Ok result"), + Ok(_) => panic!("should not return Ok result"), Err(e) => assert_eq!(e.to_string(), "Some error"), } } @@ -130,13 +130,15 @@ mod validate_identity_credit_withdrawal_transition_state_factory { anyhow::Ok(Some(identity)) }); - let (state_transition, validator) = setup_test(state_repository, Some(5)); + let (mut state_transition, validator) = setup_test(state_repository, Some(5)); + + state_transition.revision = 1; let result = validator .validate_identity_credit_withdrawal_transition_state(&state_transition) .await .unwrap(); - assert_eq!(result.is_valid(), true); + assert!(result.is_valid()); } } diff --git a/packages/rs-dpp/src/tests/mod.rs b/packages/rs-dpp/src/tests/mod.rs index 9811d845083..1089b693b75 100644 --- a/packages/rs-dpp/src/tests/mod.rs +++ b/packages/rs-dpp/src/tests/mod.rs @@ -1,9 +1,15 @@ pub mod fixtures; + +#[cfg(test)] mod identifier_spec; pub mod utils; +#[cfg(test)] mod data_contract; +#[cfg(test)] mod document; +#[cfg(test)] mod identity; +#[cfg(test)] mod version; diff --git a/packages/rs-dpp/src/util/cbor_value/cbor_map.rs b/packages/rs-dpp/src/util/cbor_value/cbor_map.rs index 77f779ea024..29e8d54eab8 100644 --- a/packages/rs-dpp/src/util/cbor_value/cbor_map.rs +++ b/packages/rs-dpp/src/util/cbor_value/cbor_map.rs @@ -17,6 +17,11 @@ pub trait CborBTreeMapHelper { fn get_optional_integer>(&self, key: &str) -> Result, ProtocolError>; fn get_integer>(&self, key: &str) -> Result; + fn remove_optional_integer>( + &mut self, + key: &str, + ) -> Result, ProtocolError>; + fn remove_integer>(&mut self, key: &str) -> Result; fn get_optional_bool(&self, key: &str) -> Result, ProtocolError>; fn get_bool(&self, key: &str) -> Result; fn get_optional_inner_value_array<'a, I: FromIterator<&'a CborValue>>( @@ -67,8 +72,9 @@ where } fn get_identifier(&self, key: &str) -> Result<[u8; 32], ProtocolError> { - self.get_optional_identifier(key)? - .ok_or_else(|| ProtocolError::DecodingError(format!("unable to get property {key}"))) + self.get_optional_identifier(key)?.ok_or_else(|| { + ProtocolError::DecodingError(format!("unable to get identifier property {key}")) + }) } fn get_optional_string(&self, key: &str) -> Result, ProtocolError> { @@ -83,8 +89,9 @@ where } fn get_string(&self, key: &str) -> Result { - self.get_optional_string(key)? - .ok_or_else(|| ProtocolError::DecodingError(format!("unable to get property {key}"))) + self.get_optional_string(key)?.ok_or_else(|| { + ProtocolError::DecodingError(format!("unable to get string property {key}")) + }) } fn get_optional_str(&self, key: &str) -> Result, ProtocolError> { @@ -98,8 +105,9 @@ where } fn get_str(&self, key: &str) -> Result<&str, ProtocolError> { - self.get_optional_str(key)? - .ok_or_else(|| ProtocolError::DecodingError(format!("unable to get property {key}"))) + self.get_optional_str(key)?.ok_or_else(|| { + ProtocolError::DecodingError(format!("unable to get str property {key}")) + }) } fn get_optional_integer>( @@ -120,8 +128,9 @@ where } fn get_integer>(&self, key: &str) -> Result { - self.get_optional_integer(key)? - .ok_or_else(|| ProtocolError::DecodingError(format!("unable to get property {key}"))) + self.get_optional_integer(key)?.ok_or_else(|| { + ProtocolError::DecodingError(format!("unable to get integer property {key}")) + }) } fn get_optional_bool(&self, key: &str) -> Result, ProtocolError> { @@ -135,8 +144,32 @@ where } fn get_bool(&self, key: &str) -> Result { - self.get_optional_bool(key)? - .ok_or_else(|| ProtocolError::DecodingError(format!("unable to get property {key}"))) + self.get_optional_bool(key)?.ok_or_else(|| { + ProtocolError::DecodingError(format!("unable to get bool property {key}")) + }) + } + + fn remove_optional_integer>( + &mut self, + key: &str, + ) -> Result, ProtocolError> { + self.remove(key) + .map(|v| { + i128::from(v.borrow().as_integer().ok_or_else(|| { + ProtocolError::DecodingError(format!("{key} must be an integer")) + })?) + .try_into() + .map_err(|_| { + ProtocolError::DecodingError(format!("{key} is out of required bounds")) + }) + }) + .transpose() + } + + fn remove_integer>(&mut self, key: &str) -> Result { + self.remove_optional_integer(key)?.ok_or_else(|| { + ProtocolError::DecodingError(format!("unable to remove integer property {key}")) + }) } fn get_optional_inner_value_array<'a, I: FromIterator<&'a CborValue>>( @@ -157,8 +190,9 @@ where &'a self, key: &str, ) -> Result { - self.get_optional_inner_value_array(key)? - .ok_or_else(|| ProtocolError::DecodingError(format!("unable to get property {key}"))) + self.get_optional_inner_value_array(key)?.ok_or_else(|| { + ProtocolError::DecodingError(format!("unable to get inner value array property {key}")) + }) } fn get_optional_inner_string_array>( @@ -190,8 +224,9 @@ where &self, key: &str, ) -> Result { - self.get_optional_inner_string_array(key)? - .ok_or_else(|| ProtocolError::DecodingError(format!("unable to get property {key}"))) + self.get_optional_inner_string_array(key)?.ok_or_else(|| { + ProtocolError::DecodingError(format!("unable to get inner string property {key}")) + }) } fn get_optional_inner_borrowed_str_value_map< @@ -240,6 +275,10 @@ where key: &str, ) -> Result { self.get_optional_inner_borrowed_str_value_map(key)? - .ok_or_else(|| ProtocolError::DecodingError(format!("unable to get property {key}"))) + .ok_or_else(|| { + ProtocolError::DecodingError(format!( + "unable to get borrowed str value map property {key}" + )) + }) } } diff --git a/packages/rs-dpp/src/util/is_fibonacci_number.rs b/packages/rs-dpp/src/util/is_fibonacci_number.rs index 6f8a554e15c..fab42b93875 100644 --- a/packages/rs-dpp/src/util/is_fibonacci_number.rs +++ b/packages/rs-dpp/src/util/is_fibonacci_number.rs @@ -1,7 +1,7 @@ -fn is_perfect_square(number: u64) -> bool { +fn is_perfect_square(number: u32) -> bool { (number as f64).sqrt().fract() == 0.0 } -pub fn is_fibonacci_number(number: u64) -> bool { +pub fn is_fibonacci_number(number: u32) -> bool { is_perfect_square(5 * number * number + 4) || is_perfect_square(5 * number * number - 4) } diff --git a/packages/rs-dpp/src/util/json_value/mod.rs b/packages/rs-dpp/src/util/json_value/mod.rs index 30f9e425dd7..09827a12031 100644 --- a/packages/rs-dpp/src/util/json_value/mod.rs +++ b/packages/rs-dpp/src/util/json_value/mod.rs @@ -44,6 +44,8 @@ pub trait JsonValueExt { fn get_string(&self, property_name: &str) -> Result<&str, anyhow::Error>; fn get_i64(&self, property_name: &str) -> Result; fn get_f64(&self, property_name: &str) -> Result; + fn get_u8(&self, property_name: &str) -> Result; + fn get_u32(&self, property_name: &str) -> Result; fn get_u64(&self, property_name: &str) -> Result; fn get_bytes(&self, property_name: &str) -> Result, anyhow::Error>; /// returns the the mutable JsonValue from provided path. The path is dot-separated string. i.e `properties.id` @@ -171,6 +173,52 @@ impl JsonValueExt for JsonValue { ); } + fn get_u8(&self, property_name: &str) -> Result { + let property_value = self.get(property_name).ok_or_else(|| { + anyhow!( + "the property '{}' doesn't exist in '{:?}'", + property_name, + self + ) + })?; + + if let JsonValue::Number(s) = property_value { + return s + .as_u64() + .ok_or_else(|| anyhow!("unable convert {} to u64", s))? + .try_into() + .map_err(|e| anyhow!("unable convert {} to u8: {}", s, e)); + } + bail!( + "getting property '{}' failed: {:?} isn't a number", + property_name, + property_value + ); + } + + fn get_u32(&self, property_name: &str) -> Result { + let property_value = self.get(property_name).ok_or_else(|| { + anyhow!( + "the property '{}' doesn't exist in '{:?}'", + property_name, + self + ) + })?; + + if let JsonValue::Number(s) = property_value { + return s + .as_u64() + .ok_or_else(|| anyhow!("unable convert {} to u64", s))? + .try_into() + .map_err(|e| anyhow!("unable convert {} to u32: {}", s, e)); + } + bail!( + "getting property '{}' failed: {:?} isn't a number", + property_name, + property_value + ); + } + fn get_u64(&self, property_name: &str) -> Result { let property_value = self.get(property_name).ok_or_else(|| { anyhow!( diff --git a/packages/rs-drive-abci/Cargo.toml b/packages/rs-drive-abci/Cargo.toml index 6a6264ed62e..7dd2cf1d962 100644 --- a/packages/rs-drive-abci/Cargo.toml +++ b/packages/rs-drive-abci/Cargo.toml @@ -11,14 +11,23 @@ ciborium = { git="https://github.com/qrayven/ciborium", branch="feat-ser-null-as chrono = "0.4.20" serde = { version = "1.0.132", features = ["derive"] } serde_json = { version="1.0", features=["preserve_order"] } -drive = { path = "../rs-drive" } +drive = { path = "../rs-drive", features = ["fixtures-and-mocks"]} thiserror = "1.0.30" rand = "0.8.4" tempfile = "3.3.0" bs58 = "0.4.0" base64 = "0.13.0" hex = "0.4.3" -dashcore = { git="https://github.com/dashpay/rust-dashcore", features=["no-std", "secp-recovery", "rand", "signer"], default-features = false, branch="master" } +dashcore = { git="https://github.com/dashevo/rust-dashcore", features=["std", "secp-recovery", "rand", "signer", "use-serde"], default-features = false, rev = "51548a4a1b9eca7430f5f3caf94d9784886ff2e9" } +dashcore-rpc = { git="https://github.com/jawid-h/rust-dashcore-rpc", branch="fix/attempt-to-fix" } +dpp = { path = "../rs-dpp", features = ["fixtures-and-mocks"]} rust_decimal = "1.2.5" rust_decimal_macros = "1.25.0" +mockall= { version ="0.11", optional = true } +[dev-dependencies] +itertools = { version = "0.10.5" } + +[features] +default = ["fixtures-and-mocks"] +fixtures-and-mocks = ["mockall"] diff --git a/packages/rs-drive-abci/src/abci/handlers.rs b/packages/rs-drive-abci/src/abci/handlers.rs index 63ba6654249..73d957d64ab 100644 --- a/packages/rs-drive-abci/src/abci/handlers.rs +++ b/packages/rs-drive-abci/src/abci/handlers.rs @@ -38,7 +38,6 @@ use crate::abci::messages::{ }; use crate::block::{BlockExecutionContext, BlockStateInfo}; use crate::execution::fee_pools::epoch::EpochInfo; -use drive::fee::epoch::GENESIS_EPOCH_INDEX; use drive::grovedb::TransactionArg; use crate::error::execution::ExecutionError; @@ -136,12 +135,25 @@ impl TenderdashAbci for Platform { hpmn_count: request.total_hpmns, }; + // If last synced Core block height is not set instead of scanning + // number of blocks for asset unlock transactions scan only one + // on Core chain locked height by setting last_synced_core_height to the same value + let last_synced_core_height = if request.last_synced_core_height == 0 { + block_execution_context.block_info.core_chain_locked_height + } else { + request.last_synced_core_height + }; + self.block_execution_context .replace(Some(block_execution_context)); + self.update_broadcasted_withdrawal_transaction_statuses( + last_synced_core_height, + transaction, + )?; + let unsigned_withdrawal_transaction_bytes = self .fetch_and_prepare_unsigned_withdrawal_transactions( - request.block_height as u32, request.validator_set_quorum_hash, transaction, )?; @@ -168,6 +180,8 @@ impl TenderdashAbci for Platform { ), ))?; + self.pool_withdrawals_into_transactions_queue(transaction)?; + // Process fees let process_block_fees_outcome = self.process_block_fees( &block_execution_context.block_info, @@ -230,18 +244,32 @@ impl TenderdashAbci for Platform { mod tests { mod handlers { use crate::abci::handlers::TenderdashAbci; + use crate::config::PlatformConfig; + use crate::rpc::core::MockCoreRPCLike; use chrono::{Duration, Utc}; + use dashcore::hashes::hex::FromHex; + use dashcore::BlockHash; + use dpp::contracts::withdrawals_contract; + use dpp::data_contract::DriveContractExt; + use dpp::identity::state_transition::identity_credit_withdrawal_transition::Pooling; + use dpp::prelude::Identifier; + use dpp::system_data_contracts::{load_system_data_contract, SystemDataContract}; + use dpp::tests::fixtures::get_withdrawal_document_fixture; + use dpp::util::hash; use drive::common::helpers::identities::create_test_masternode_identities; - use drive::drive::batch::GroveDbOpBatch; + use drive::drive::block_info::BlockInfo; + use drive::drive::identity::withdrawals::WithdrawalTransactionIdAndBytes; use drive::fee::epoch::CreditsPerEpoch; + use drive::fee_pools::epochs::Epoch; + use drive::tests::helpers::setup::setup_document; use rust_decimal::prelude::ToPrimitive; + use serde_json::json; use std::cmp::Ordering; use std::ops::Div; use crate::abci::messages::{ AfterFinalizeBlockRequest, BlockBeginRequest, BlockEndRequest, BlockFees, }; - use crate::config::PlatformConfig; use crate::test::fixture::abci::static_init_chain_request; use crate::test::helpers::fee_pools::create_test_masternode_share_identities_and_documents; use crate::test::helpers::setup::setup_platform_raw; @@ -250,11 +278,13 @@ mod tests { #[test] fn test_abci_flow() { - let platform = setup_platform_raw(Some(PlatformConfig { - drive_config: Default::default(), + let mut platform = setup_platform_raw(Some(PlatformConfig { verify_sum_trees: false, ..Default::default() })); + + let mut core_rpc_mock = MockCoreRPCLike::new(); + let transaction = platform.drive.grove.start_transaction(); // init chain @@ -264,21 +294,63 @@ mod tests { .init_chain(init_chain_request, Some(&transaction)) .expect("should init chain"); + let data_contract = load_system_data_contract(SystemDataContract::Withdrawals) + .expect("to load system data contract"); + // Init withdrawal requests - let withdrawals = (0..16) + let withdrawals: Vec = (0..16) .map(|index: u64| (index.to_be_bytes().to_vec(), vec![index as u8; 32])) .collect(); - let mut batch = GroveDbOpBatch::new(); + let owner_id = Identifier::new([1u8; 32]); + + for (_, tx_bytes) in withdrawals.iter() { + let tx_id = hash::hash(tx_bytes); + + let document = get_withdrawal_document_fixture( + &data_contract, + owner_id, + json!({ + "amount": 1000, + "coreFeePerByte": 1, + "pooling": Pooling::Never, + "outputScript": (0..23).collect::>(), + "status": withdrawals_contract::WithdrawalStatus::POOLED, + "transactionIndex": 1, + "transactionSignHeight": 93, + "transactionId": tx_id, + }), + ); + + let document_type = data_contract + .document_type_for_name(withdrawals_contract::document_types::WITHDRAWAL) + .expect("expected to get document type"); + + setup_document( + &platform.drive, + &document, + &data_contract, + document_type, + Some(&transaction), + ); + } + + let block_info = BlockInfo { + time_ms: 1, + height: 1, + epoch: Epoch::new(1), + }; + + let mut drive_operations = vec![]; platform .drive - .add_enqueue_withdrawal_transaction_operations(&mut batch, withdrawals); + .add_enqueue_withdrawal_transaction_operations(&withdrawals, &mut drive_operations); platform .drive - .grove_apply_batch(batch, true, Some(&transaction)) - .expect("to apply batch"); + .apply_drive_operations(drive_operations, true, &block_info, Some(&transaction)) + .expect("to apply drive operations"); // setup the contract let contract = platform.create_mn_shares_contract(Some(&transaction)); @@ -317,6 +389,23 @@ mod tests { let mut previous_block_time_ms: Option = None; + core_rpc_mock + .expect_get_block_hash() + // .times(total_days) + .returning(|_| { + Ok(BlockHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap()) + }); + + core_rpc_mock + .expect_get_block_json() + // .times(total_days) + .returning(|_| Ok(json!({}))); + + platform.core_rpc = Box::new(core_rpc_mock); + // process blocks for day in 0..total_days { for block_num in 0..blocks_per_day { @@ -345,6 +434,8 @@ mod tests { .unwrap(), proposed_app_version: 1, validator_set_quorum_hash: Default::default(), + last_synced_core_height: 1, + core_chain_locked_height: 1, total_hpmns: proposers_count as u32, }; @@ -469,11 +560,30 @@ mod tests { fn test_chain_halt_for_36_days() { // TODO refactor to remove code duplication - let platform = setup_platform_raw(Some(PlatformConfig { - drive_config: Default::default(), + let mut platform = setup_platform_raw(Some(PlatformConfig { verify_sum_trees: false, ..Default::default() })); + + let mut core_rpc_mock = MockCoreRPCLike::new(); + + core_rpc_mock + .expect_get_block_hash() + // .times(1) // TODO: investigate why it always n + 1 + .returning(|_| { + Ok(BlockHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap()) + }); + + core_rpc_mock + .expect_get_block_json() + // .times(1) // TODO: investigate why it always n + 1 + .returning(|_| Ok(json!({}))); + + platform.core_rpc = Box::new(core_rpc_mock); + let transaction = platform.drive.grove.start_transaction(); // init chain @@ -544,6 +654,8 @@ mod tests { .unwrap(), proposed_app_version: 1, validator_set_quorum_hash: Default::default(), + last_synced_core_height: 1, + core_chain_locked_height: 1, total_hpmns: proposers_count as u32, }; diff --git a/packages/rs-drive-abci/src/abci/messages.rs b/packages/rs-drive-abci/src/abci/messages.rs index 686650b2b7a..26912da7cb5 100644 --- a/packages/rs-drive-abci/src/abci/messages.rs +++ b/packages/rs-drive-abci/src/abci/messages.rs @@ -101,6 +101,10 @@ pub struct BlockBeginRequest { pub proposed_app_version: ProtocolVersion, /// Validator set quorum hash pub validator_set_quorum_hash: [u8; 32], + /// Last synced core height + pub last_synced_core_height: u32, + /// Core chain locked height + pub core_chain_locked_height: u32, /// The total number of HPMNs in the system pub total_hpmns: u32, } diff --git a/packages/rs-drive-abci/src/block.rs b/packages/rs-drive-abci/src/block.rs index 2b76848d061..92b5ca09888 100644 --- a/packages/rs-drive-abci/src/block.rs +++ b/packages/rs-drive-abci/src/block.rs @@ -40,6 +40,8 @@ pub struct BlockStateInfo { pub previous_block_time_ms: Option, /// Block proposer's proTxHash pub proposer_pro_tx_hash: [u8; 32], + /// Core chain locked height + pub core_chain_locked_height: u32, } impl BlockStateInfo { @@ -50,6 +52,7 @@ impl BlockStateInfo { block_time_ms: block_begin_request.block_time_ms, previous_block_time_ms: block_begin_request.previous_block_time_ms, proposer_pro_tx_hash: block_begin_request.proposer_pro_tx_hash, + core_chain_locked_height: block_begin_request.core_chain_locked_height, } } } diff --git a/packages/rs-drive-abci/src/config.rs b/packages/rs-drive-abci/src/config.rs index a09dea5635d..541d6f92b34 100644 --- a/packages/rs-drive-abci/src/config.rs +++ b/packages/rs-drive-abci/src/config.rs @@ -28,11 +28,34 @@ use drive::drive::config::DriveConfig; +/// Configuration for Dash Core RPC client +#[derive(Clone, Debug)] +pub struct CoreRpcConfig { + /// Core RPC client url + pub url: String, + + /// Core RPC client username + pub username: String, + + /// Core RPC client password + pub password: String, +} + +/// Configuration for Dash Core related things +#[derive(Clone, Debug)] +pub struct CoreConfig { + /// Core RPC config + pub rpc: CoreRpcConfig, +} + +/// Platform configuration #[derive(Clone, Debug)] -/// Platform configuration struct pub struct PlatformConfig { - /// The underlying drive configuration - pub drive_config: DriveConfig, + /// Drive configuration + pub drive: Option, + + /// Dash Core config + pub core: CoreConfig, /// Should we verify sum trees? Useful to set as no for tests pub verify_sum_trees: bool, @@ -46,11 +69,18 @@ pub struct PlatformConfig { impl Default for PlatformConfig { fn default() -> Self { - PlatformConfig { + Self { verify_sum_trees: true, quorum_size: 100, validator_set_quorum_rotation_block_count: 15, - drive_config: Default::default(), + drive: Default::default(), + core: CoreConfig { + rpc: CoreRpcConfig { + url: "127.0.0.1".to_owned(), + username: "".to_owned(), + password: "".to_owned(), + }, + }, } } } diff --git a/packages/rs-drive-abci/src/execution/engine.rs b/packages/rs-drive-abci/src/execution/engine.rs index 9bf11f48b2a..941a60d57fb 100644 --- a/packages/rs-drive-abci/src/execution/engine.rs +++ b/packages/rs-drive-abci/src/execution/engine.rs @@ -2,20 +2,16 @@ use crate::abci::handlers::TenderdashAbci; use crate::abci::messages::{ AfterFinalizeBlockRequest, BlockBeginRequest, BlockEndRequest, BlockFees, }; -use crate::constants::PROTOCOL_VERSION_UPGRADE_PERCENTAGE_NEEDED; use crate::error::execution::ExecutionError; use crate::error::Error; use crate::platform::Platform; -use crate::state::PlatformState; use drive::dpp::identity::PartialIdentity; use drive::dpp::util::deserializer::ProtocolVersion; use drive::drive::batch::DriveOperationType; use drive::drive::block_info::BlockInfo; use drive::error::Error::GroveDB; use drive::fee::result::FeeResult; -use drive::fee_pools::epochs::Epoch; use drive::grovedb::Transaction; -use drive::query::TransactionArg; /// An execution event pub enum ExecutionEvent<'a> { @@ -168,12 +164,14 @@ impl Platform { proposer_pro_tx_hash: proposer, proposed_app_version: proposed_version, validator_set_quorum_hash: Default::default(), + last_synced_core_height: 1, + core_chain_locked_height: 1, total_hpmns, }; // println!("Block #{}", block_info.height); - let block_begin_response = self + let _block_begin_response = self .block_begin(block_begin_request, Some(&transaction)) .unwrap_or_else(|e| { panic!( diff --git a/packages/rs-drive-abci/src/execution/fee_pools/process_block_fees.rs b/packages/rs-drive-abci/src/execution/fee_pools/process_block_fees.rs index a3199019efc..ddd80da695e 100644 --- a/packages/rs-drive-abci/src/execution/fee_pools/process_block_fees.rs +++ b/packages/rs-drive-abci/src/execution/fee_pools/process_block_fees.rs @@ -310,6 +310,7 @@ mod tests { block_time_ms, previous_block_time_ms, proposer_pro_tx_hash, + core_chain_locked_height: 1, }; let epoch_info = @@ -479,6 +480,7 @@ mod tests { block_time_ms, previous_block_time_ms, proposer_pro_tx_hash, + core_chain_locked_height: 1, }; let epoch_info = @@ -556,7 +558,6 @@ mod tests { // We are not adding to the overall platform credits so we can't verify // the sum trees let platform = setup_platform_with_initial_state_structure(Some(PlatformConfig { - drive_config: Default::default(), verify_sum_trees: false, ..Default::default() })); diff --git a/packages/rs-drive-abci/src/identity_credit_withdrawal/mod.rs b/packages/rs-drive-abci/src/identity_credit_withdrawal/mod.rs index 0cd3ca43228..22a1dc31dab 100644 --- a/packages/rs-drive-abci/src/identity_credit_withdrawal/mod.rs +++ b/packages/rs-drive-abci/src/identity_credit_withdrawal/mod.rs @@ -1,8 +1,27 @@ +use std::{collections::HashMap, ops::Deref}; + use dashcore::{ - blockdata::transaction::special_transaction::asset_unlock::request_info::AssetUnlockRequestInfo, - hashes::Hash, QuorumHash, + blockdata::transaction::special_transaction::asset_unlock::{ + request_info::AssetUnlockRequestInfo, + unqualified_asset_unlock::{AssetUnlockBasePayload, AssetUnlockBaseTransactionInfo}, + }, + consensus::Encodable, + hashes::Hash, + QuorumHash, Script, TxOut, +}; +use drive::dpp::contracts::withdrawals_contract; +use drive::dpp::data_contract::DriveContractExt; +use drive::dpp::document::document_stub::DocumentStub; +use drive::dpp::identifier::Identifier; +use drive::dpp::identity::convert_credits_to_satoshi; +use drive::dpp::util::hash; +use drive::drive::identity::withdrawals::WithdrawalTransactionIdAndBytes; +use drive::{ + drive::{batch::DriveOperationType, block_info::BlockInfo}, + fee_pools::epochs::Epoch, + query::TransactionArg, }; -use drive::query::TransactionArg; +use serde_json::Value as JsonValue; use crate::{ error::{execution::ExecutionError, Error}, @@ -10,42 +29,1033 @@ use crate::{ }; const WITHDRAWAL_TRANSACTIONS_QUERY_LIMIT: u16 = 16; +const NUMBER_OF_BLOCKS_BEFORE_EXPIRED: u32 = 48; impl Platform { + /// Update statuses for broadcasted withdrawals + pub fn update_broadcasted_withdrawal_transaction_statuses( + &self, + last_synced_core_height: u32, + transaction: TransactionArg, + ) -> Result<(), Error> { + // Retrieve block execution context + let block_execution_context = self.block_execution_context.borrow(); + let block_execution_context = block_execution_context.as_ref().ok_or(Error::Execution( + ExecutionError::CorruptedCodeExecution( + "block execution context must be set in block begin handler", + ), + ))?; + + let block_info = BlockInfo { + time_ms: block_execution_context.block_info.block_time_ms, + height: block_execution_context.block_info.block_height, + epoch: Epoch::new(block_execution_context.epoch_info.current_epoch_index), + }; + + let data_contract_id = &withdrawals_contract::CONTRACT_ID; + + let (_, Some(contract_fetch_info)) = self.drive.get_contract_with_fetch_info( + data_contract_id.to_buffer(), + None, + transaction, + )? else { + return Err(Error::Execution( + ExecutionError::CorruptedCodeExecution("can't fetch withdrawal data contract"), + )); + }; + + let core_transactions = self.fetch_core_block_transactions( + last_synced_core_height, + block_execution_context.block_info.core_chain_locked_height, + )?; + + let broadcasted_withdrawal_documents = self.drive.fetch_withdrawal_documents_by_status( + withdrawals_contract::WithdrawalStatus::BROADCASTED.into(), + transaction, + )?; + + let mut drive_operations: Vec = vec![]; + + // Collecting only documents that have been updated + let documents_to_update: Vec = broadcasted_withdrawal_documents + .into_iter() + .map(|mut document| { + let transaction_sign_height = document + .get_u32(withdrawals_contract::property_names::TRANSACTION_SIGN_HEIGHT) + .map_err(|_| { + Error::Execution(ExecutionError::CorruptedCodeExecution( + "Can't get transactionSignHeight from withdrawal document", + )) + })?; + + let transaction_id_bytes = document + .get_bytes(withdrawals_contract::property_names::TRANSACTION_ID) + .map_err(|_| { + Error::Execution(ExecutionError::CorruptedCodeExecution( + "Can't get transactionId from withdrawal document", + )) + })?; + + let transaction_index = document + .get_u64(withdrawals_contract::property_names::TRANSACTION_INDEX) + .map_err(|_| { + Error::Execution(ExecutionError::CorruptedCodeExecution( + "Can't get transactionIdex from withdrawal document", + )) + })?; + + let transaction_id = hex::encode(transaction_id_bytes); + + let block_height_difference = + block_execution_context.block_info.core_chain_locked_height + - transaction_sign_height; + + let status; + + if core_transactions.contains(&transaction_id) { + status = withdrawals_contract::WithdrawalStatus::COMPLETE; + } else if block_height_difference > NUMBER_OF_BLOCKS_BEFORE_EXPIRED { + status = withdrawals_contract::WithdrawalStatus::EXPIRED; + } else { + return Ok(None); + }; + + document.set_u8(withdrawals_contract::property_names::STATUS, status.into()); + + document.set_i64( + withdrawals_contract::property_names::UPDATED_AT, + block_info.time_ms.try_into().map_err(|_| { + Error::Execution(ExecutionError::CorruptedCodeExecution( + "Can't convert u64 block time to i64 updated_at", + )) + })?, + ); + + document.increment_revision().map_err(Error::Protocol)?; + + if status == withdrawals_contract::WithdrawalStatus::EXPIRED { + self.drive.add_insert_expired_index_operation( + transaction_index, + &mut drive_operations, + ); + } + + Ok(Some(document)) + }) + .collect::>, Error>>()? + .into_iter() + .flatten() + .collect(); + + self.drive.add_update_multiple_documents_operations( + &documents_to_update, + &contract_fetch_info.contract, + contract_fetch_info + .contract + .document_type_for_name(withdrawals_contract::document_types::WITHDRAWAL) + .map_err(|_| { + Error::Execution(ExecutionError::CorruptedCodeExecution( + "Can't fetch withdrawal data contract", + )) + })?, + &mut drive_operations, + ); + + self.drive + .apply_drive_operations(drive_operations, true, &block_info, transaction)?; + + Ok(()) + } + /// Prepares a list of an unsigned withdrawal transaction bytes pub fn fetch_and_prepare_unsigned_withdrawal_transactions( &self, - block_height: u32, validator_set_quorum_hash: [u8; 32], transaction: TransactionArg, ) -> Result>, Error> { + // Retrieve block execution context + let block_execution_context = self.block_execution_context.borrow(); + let block_execution_context = block_execution_context.as_ref().ok_or(Error::Execution( + ExecutionError::CorruptedCodeExecution( + "block execution context must be set in block begin handler", + ), + ))?; + + let block_info = BlockInfo { + time_ms: block_execution_context.block_info.block_time_ms, + height: block_execution_context.block_info.block_height, + epoch: Epoch::new(block_execution_context.epoch_info.current_epoch_index), + }; + + let data_contract_id = withdrawals_contract::CONTRACT_ID.deref(); + + let (_, Some(contract_fetch_info)) = self.drive.get_contract_with_fetch_info( + data_contract_id.to_buffer(), + None, + transaction, + )? else { + return Err(Error::Execution( + ExecutionError::CorruptedCodeExecution("can't fetch withdrawal data contract"), + )); + }; + + let mut drive_operations: Vec = vec![]; + // Get 16 latest withdrawal transactions from the queue - let withdrawal_transactions = self - .drive - .dequeue_withdrawal_transactions(WITHDRAWAL_TRANSACTIONS_QUERY_LIMIT, transaction)?; + let untied_withdrawal_transactions = self.drive.dequeue_withdrawal_transactions( + WITHDRAWAL_TRANSACTIONS_QUERY_LIMIT, + transaction, + &mut drive_operations, + )?; - // Appending request_height and quorum_hash to withdrwal transaction + if untied_withdrawal_transactions.is_empty() { + return Ok(Vec::new()); + } + + // Appending request_height and quorum_hash to withdrawal transaction // and pass it to JS Drive for singing and broadcasting - withdrawal_transactions - .into_iter() - .map(|(_, bytes)| { - let request_info = AssetUnlockRequestInfo { - request_height: block_height, - quorum_hash: QuorumHash::hash(&validator_set_quorum_hash), - }; + let (unsigned_withdrawal_transactions, documents_to_update): (Vec<_>, Vec<_>) = + untied_withdrawal_transactions + .into_iter() + .map(|(_, untied_transaction_bytes)| { + let request_info = AssetUnlockRequestInfo { + request_height: block_execution_context.block_info.core_chain_locked_height, + quorum_hash: QuorumHash::hash(&validator_set_quorum_hash), + }; - let mut bytes_buffer = vec![]; + let mut unsigned_transaction_bytes = vec![]; - request_info - .consensus_append_to_base_encode(bytes, &mut bytes_buffer) - .map_err(|_| { + request_info + .consensus_append_to_base_encode( + untied_transaction_bytes.clone(), + &mut unsigned_transaction_bytes, + ) + .map_err(|_| { + Error::Execution(ExecutionError::CorruptedCodeExecution( + "could not add additional request info to asset unlock transaction", + )) + })?; + + let original_transaction_id = hash::hash(untied_transaction_bytes); + let update_transaction_id = hash::hash(unsigned_transaction_bytes.clone()); + + let mut document = self.drive.find_withdrawal_document_by_transaction_id( + &original_transaction_id, + transaction, + )?; + + document.set_bytes( + withdrawals_contract::property_names::TRANSACTION_ID, + update_transaction_id, + ); + + document.set_i64( + withdrawals_contract::property_names::UPDATED_AT, + block_info.time_ms.try_into().map_err(|_| { + Error::Execution(ExecutionError::CorruptedCodeExecution( + "Can't convert u64 block time to i64 updated_at", + )) + })?, + ); + + document.increment_revision().map_err(|_| { Error::Execution(ExecutionError::CorruptedCodeExecution( - "could not add aditional request info to asset unlock transaction", + "Could not increment document revision", )) })?; - Ok(bytes_buffer) - }) - .collect::>, Error>>() + Ok((unsigned_transaction_bytes, document)) + }) + .collect::, DocumentStub)>, Error>>()? + .into_iter() + .unzip(); + + self.drive.add_update_multiple_documents_operations( + &documents_to_update, + &contract_fetch_info.contract, + contract_fetch_info + .contract + .document_type_for_name(withdrawals_contract::document_types::WITHDRAWAL) + .map_err(|_| { + Error::Execution(ExecutionError::CorruptedCodeExecution( + "could not get document type", + )) + })?, + &mut drive_operations, + ); + + self.drive + .apply_drive_operations(drive_operations, true, &block_info, transaction)?; + + Ok(unsigned_withdrawal_transactions) + } + + /// Pool withdrawal documents into transactions + pub fn pool_withdrawals_into_transactions_queue( + &self, + transaction: TransactionArg, + ) -> Result<(), Error> { + // Retrieve block execution context + let block_execution_context = self.block_execution_context.borrow(); + let block_execution_context = block_execution_context.as_ref().ok_or(Error::Execution( + ExecutionError::CorruptedCodeExecution( + "block execution context must be set in block begin handler", + ), + ))?; + + let block_info = BlockInfo { + time_ms: block_execution_context.block_info.block_time_ms, + height: block_execution_context.block_info.block_height, + epoch: Epoch::new(block_execution_context.epoch_info.current_epoch_index), + }; + + let data_contract_id = withdrawals_contract::CONTRACT_ID.deref(); + + let (_, Some(contract_fetch_info)) = self.drive.get_contract_with_fetch_info( + data_contract_id.to_buffer(), + None, + transaction, + )? else { + return Err(Error::Execution( + ExecutionError::CorruptedCodeExecution("can't fetch withdrawal data contract"), + )); + }; + + let mut documents = self.drive.fetch_withdrawal_documents_by_status( + withdrawals_contract::WithdrawalStatus::QUEUED.into(), + transaction, + )?; + + if documents.is_empty() { + return Ok(()); + } + + let mut drive_operations = vec![]; + + let withdrawal_transactions = self.build_withdrawal_transactions_from_documents( + &documents, + &mut drive_operations, + transaction, + )?; + + for document in documents.iter_mut() { + let document_id = Identifier::from_bytes(&document.id)?; + + let Some((_, transaction_bytes)) = withdrawal_transactions.get(&document_id) else { + return Err(Error::Execution(ExecutionError::CorruptedCodeExecution("transactions must contain a transaction"))) + }; + + let transaction_id = hash::hash(transaction_bytes); + + document.set_bytes( + withdrawals_contract::property_names::TRANSACTION_ID, + transaction_id.clone(), + ); + + document.set_u8( + withdrawals_contract::property_names::STATUS, + withdrawals_contract::WithdrawalStatus::POOLED as u8, + ); + + document.set_i64( + withdrawals_contract::property_names::UPDATED_AT, + block_info.time_ms.try_into().map_err(|_| { + Error::Execution(ExecutionError::CorruptedCodeExecution( + "Can't convert u64 block time to i64 updated_at", + )) + })?, + ); + + document.increment_revision().map_err(|_| { + Error::Execution(ExecutionError::CorruptedCodeExecution( + "Could not increment document revision", + )) + })?; + } + + self.drive.add_update_multiple_documents_operations( + &documents, + &contract_fetch_info.contract, + contract_fetch_info + .contract + .document_type_for_name(withdrawals_contract::document_types::WITHDRAWAL) + .map_err(|_| { + Error::Execution(ExecutionError::CorruptedCodeExecution( + "Can't fetch withdrawal data contract", + )) + })?, + &mut drive_operations, + ); + + let withdrawal_transactions: Vec = withdrawal_transactions + .values() + .into_iter() + .cloned() + .collect(); + + self.drive.add_enqueue_withdrawal_transaction_operations( + &withdrawal_transactions, + &mut drive_operations, + ); + + self.drive + .apply_drive_operations(drive_operations, true, &block_info, transaction)?; + + Ok(()) + } + + /// Fetch Core transactions by range of Core heights + pub fn fetch_core_block_transactions( + &self, + last_synced_core_height: u32, + core_chain_locked_height: u32, + ) -> Result, Error> { + let mut tx_hashes: Vec = vec![]; + + for height in last_synced_core_height..=core_chain_locked_height { + let block_hash = self.core_rpc.get_block_hash(height).map_err(|_| { + Error::Execution(ExecutionError::CorruptedCodeExecution( + "could not get block by height", + )) + })?; + + let block_json: JsonValue = + self.core_rpc.get_block_json(&block_hash).map_err(|_| { + Error::Execution(ExecutionError::CorruptedCodeExecution( + "could not get block by hash", + )) + })?; + + if let Some(transactions) = block_json.get("tx") { + if let Some(transactions) = transactions.as_array() { + for transaction_hash in transactions { + tx_hashes.push( + transaction_hash + .as_str() + .ok_or(Error::Execution(ExecutionError::CorruptedCodeExecution( + "could not get transaction hash as string", + )))? + .to_string(), + ); + } + } + } + } + + Ok(tx_hashes) + } + + /// Build list of Core transactions from withdrawal documents + pub fn build_withdrawal_transactions_from_documents( + &self, + documents: &[DocumentStub], + drive_operation_types: &mut Vec, + transaction: TransactionArg, + ) -> Result, Error> { + let mut withdrawals: HashMap = HashMap::new(); + + let latest_withdrawal_index = self + .drive + .fetch_and_remove_latest_withdrawal_transaction_index_operations( + drive_operation_types, + transaction, + )?; + + for (i, document) in documents.iter().enumerate() { + let output_script_bytes = document + .get_bytes(withdrawals_contract::property_names::OUTPUT_SCRIPT) + .map_err(|_| { + Error::Execution(ExecutionError::CorruptedCodeExecution( + "Can't get outputScript from withdrawal document", + )) + })?; + + let amount = document + .get_u64(withdrawals_contract::property_names::AMOUNT) + .map_err(|_| { + Error::Execution(ExecutionError::CorruptedCodeExecution( + "Can't get amount from withdrawal document", + )) + })?; + + let core_fee_per_byte = document + .get_u64(withdrawals_contract::property_names::CORE_FEE_PER_BYTE) + .map_err(|_| { + Error::Execution(ExecutionError::CorruptedCodeExecution( + "Can't get coreFeePerByte from withdrawal document", + )) + })?; + + let state_transition_size = 190; + + let output_script: Script = Script::from(output_script_bytes); + + let tx_out = TxOut { + value: convert_credits_to_satoshi(amount), + script_pubkey: output_script, + }; + + let transaction_index = latest_withdrawal_index + i as u64; + + let withdrawal_transaction = AssetUnlockBaseTransactionInfo { + version: 1, + lock_time: 0, + output: vec![tx_out], + base_payload: AssetUnlockBasePayload { + version: 1, + index: transaction_index, + fee: (state_transition_size * core_fee_per_byte * 1000) as u32, + }, + }; + + let mut transaction_buffer: Vec = vec![]; + + withdrawal_transaction + .consensus_encode(&mut transaction_buffer) + .map_err(|_| { + Error::Execution(ExecutionError::CorruptedCodeExecution( + "Can't consensus encode a withdrawal transaction", + )) + })?; + + withdrawals.insert( + Identifier::from_bytes(&document.id)?, + ( + transaction_index.to_be_bytes().to_vec(), + transaction_buffer.clone(), + ), + ); + } + + Ok(withdrawals) + } +} + +#[cfg(test)] +mod tests { + use dashcore::{ + hashes::hex::{FromHex, ToHex}, + BlockHash, + }; + use dpp::{contracts::withdrawals_contract, tests::fixtures::get_withdrawal_document_fixture}; + use drive::tests::helpers::setup::setup_document; + use serde_json::json; + + use dpp::identity::state_transition::identity_credit_withdrawal_transition::Pooling; + + use crate::{ + block::BlockExecutionContext, execution::fee_pools::epoch::EpochInfo, + rpc::core::MockCoreRPCLike, + }; + + mod update_withdrawal_statuses { + use std::cell::RefCell; + + use crate::block::BlockStateInfo; + use crate::test::helpers::setup::setup_platform_with_initial_state_structure; + use dpp::{ + data_contract::{DataContract, DriveContractExt}, + prelude::Identifier, + system_data_contracts::{load_system_data_contract, SystemDataContract}, + }; + use drive::tests::helpers::setup::setup_system_data_contract; + + use super::*; + + #[test] + fn test_statuses_are_updated() { + let mut platform = setup_platform_with_initial_state_structure(None); + + let mut mock_rpc_client = MockCoreRPCLike::new(); + + mock_rpc_client + .expect_get_block_hash() + .withf(|height| *height == 95) + .returning(|_| { + Ok(BlockHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap()) + }); + + mock_rpc_client + .expect_get_block_hash() + .withf(|height| *height == 96) + .returning(|_| { + Ok(BlockHash::from_hex( + "1111111111111111111111111111111111111111111111111111111111111111", + ) + .unwrap()) + }); + + mock_rpc_client + .expect_get_block_json() + .withf(|bh| { + bh.to_hex() + == "0000000000000000000000000000000000000000000000000000000000000000" + }) + .returning(|_| { + Ok(json!({ + "tx": ["0101010101010101010101010101010101010101010101010101010101010101"] + })) + }); + + mock_rpc_client + .expect_get_block_json() + .withf(|bh| { + bh.to_hex() + == "1111111111111111111111111111111111111111111111111111111111111111" + }) + .returning(|_| { + Ok(json!({ + "tx": ["0202020202020202020202020202020202020202020202020202020202020202"] + })) + }); + + platform.core_rpc = Box::new(mock_rpc_client); + + let transaction = platform.drive.grove.start_transaction(); + + let data_contract = load_system_data_contract(SystemDataContract::Withdrawals) + .expect("to load system data contract"); + + // TODO: figure out the bug in data contract factory + let data_contract = DataContract::from_cbor( + data_contract + .to_cbor() + .expect("to convert contract to CBOR"), + ) + .expect("to create data contract from CBOR"); + + setup_system_data_contract(&platform.drive, &data_contract, Some(&transaction)); + + let owner_id = Identifier::new([1u8; 32]); + + let document_1 = get_withdrawal_document_fixture( + &data_contract, + owner_id, + json!({ + "amount": 1000, + "coreFeePerByte": 1, + "pooling": Pooling::Never, + "outputScript": (0..23).collect::>(), + "status": withdrawals_contract::WithdrawalStatus::BROADCASTED, + "transactionIndex": 1, + "transactionSignHeight": 93, + "transactionId": vec![1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + }), + ); + + let document_type = data_contract + .document_type_for_name(withdrawals_contract::document_types::WITHDRAWAL) + .expect("expected to get document type"); + + setup_document( + &platform.drive, + &document_1, + &data_contract, + document_type, + Some(&transaction), + ); + + let document_2 = get_withdrawal_document_fixture( + &data_contract, + owner_id, + json!({ + "amount": 1000, + "coreFeePerByte": 1, + "pooling": Pooling::Never, + "outputScript": (0..23).collect::>(), + "status": withdrawals_contract::WithdrawalStatus::BROADCASTED, + "transactionIndex": 2, + "transactionSignHeight": 10, + "transactionId": vec![3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + }), + ); + + setup_document( + &platform.drive, + &document_2, + &data_contract, + document_type, + Some(&transaction), + ); + + platform.block_execution_context = RefCell::new(Some(BlockExecutionContext { + block_info: BlockStateInfo { + block_height: 1, + block_time_ms: 1, + previous_block_time_ms: Some(1), + proposer_pro_tx_hash: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ], + core_chain_locked_height: 96, + }, + epoch_info: EpochInfo { + current_epoch_index: 1, + previous_epoch_index: None, + is_epoch_change: false, + }, + hpmn_count: 100, + })); + + platform + .update_broadcasted_withdrawal_transaction_statuses(95, Some(&transaction)) + .expect("to update withdrawal statuses"); + + let documents = platform + .drive + .fetch_withdrawal_documents_by_status( + withdrawals_contract::WithdrawalStatus::EXPIRED.into(), + Some(&transaction), + ) + .expect("to fetch documents by status"); + + assert_eq!(documents.len(), 1); + assert_eq!( + documents.get(0).unwrap().id.to_vec(), + document_2.id.to_vec() + ); + + let documents = platform + .drive + .fetch_withdrawal_documents_by_status( + withdrawals_contract::WithdrawalStatus::COMPLETE.into(), + Some(&transaction), + ) + .expect("to fetch documents by status"); + + assert_eq!(documents.len(), 1); + assert_eq!( + documents.get(0).unwrap().id.to_vec(), + document_1.id.to_vec() + ); + } + } + + mod pool_withdrawals_into_transactions { + use std::cell::RefCell; + + use dpp::data_contract::DriveContractExt; + use dpp::identity::state_transition::identity_credit_withdrawal_transition::Pooling; + use dpp::prelude::Identifier; + use dpp::system_data_contracts::{load_system_data_contract, SystemDataContract}; + use drive::dpp::contracts::withdrawals_contract; + use drive::tests::helpers::setup::setup_system_data_contract; + + use crate::block::BlockStateInfo; + use crate::test::helpers::setup::setup_platform_with_initial_state_structure; + + use super::*; + + #[test] + fn test_pooling() { + let mut platform = setup_platform_with_initial_state_structure(None); + + let transaction = platform.drive.grove.start_transaction(); + + let data_contract = load_system_data_contract(SystemDataContract::Withdrawals) + .expect("to load system data contract"); + + setup_system_data_contract(&platform.drive, &data_contract, Some(&transaction)); + + let owner_id = Identifier::new([1u8; 32]); + + let document_1 = get_withdrawal_document_fixture( + &data_contract, + owner_id, + json!({ + "amount": 1000, + "coreFeePerByte": 1, + "pooling": Pooling::Never, + "outputScript": (0..23).collect::>(), + "status": withdrawals_contract::WithdrawalStatus::QUEUED, + "transactionIndex": 1, + }), + ); + + let document_type = data_contract + .document_type_for_name(withdrawals_contract::document_types::WITHDRAWAL) + .expect("expected to get document type"); + + setup_document( + &platform.drive, + &document_1, + &data_contract, + document_type, + Some(&transaction), + ); + + let document_2 = get_withdrawal_document_fixture( + &data_contract, + owner_id, + json!({ + "amount": 1000, + "coreFeePerByte": 1, + "pooling": Pooling::Never, + "outputScript": (0..23).collect::>(), + "status": withdrawals_contract::WithdrawalStatus::QUEUED, + "transactionIndex": 2, + }), + ); + + setup_document( + &platform.drive, + &document_2, + &data_contract, + document_type, + Some(&transaction), + ); + + platform.block_execution_context = RefCell::new(Some(BlockExecutionContext { + block_info: BlockStateInfo { + block_height: 1, + block_time_ms: 1, + previous_block_time_ms: Some(1), + proposer_pro_tx_hash: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ], + core_chain_locked_height: 96, + }, + epoch_info: EpochInfo { + current_epoch_index: 1, + previous_epoch_index: None, + is_epoch_change: false, + }, + hpmn_count: 100, + })); + + platform + .pool_withdrawals_into_transactions_queue(Some(&transaction)) + .expect("to pool withdrawal documents into transactions"); + + let updated_documents = platform + .drive + .fetch_withdrawal_documents_by_status( + withdrawals_contract::WithdrawalStatus::POOLED.into(), + Some(&transaction), + ) + .expect("to fetch withdrawal documents"); + + let tx_ids = [ + "4b74f91644215904ff1aa4122b204ba674aea74d99a17c03fbda483692bf735b", + "897ec16cb13d802ee6acdaf55274c59f3509a4929d726bab919a962ed4a8703c", + ]; + + for document in updated_documents { + assert_eq!(document.revision, 2); + + let tx_id: Vec = document + .get_bytes("transactionId") + .expect("to get transactionId"); + + let tx_id_hex = hex::encode(tx_id); + + assert!(tx_ids.contains(&tx_id_hex.as_str())); + } + } + } + + mod fetch_core_block_transactions { + use super::*; + use crate::test::helpers::setup::setup_platform_with_initial_state_structure; + + #[test] + fn test_fetches_core_transactions() { + let mut platform = setup_platform_with_initial_state_structure(None); + + let mut mock_rpc_client = MockCoreRPCLike::new(); + + mock_rpc_client + .expect_get_block_hash() + .withf(|height| *height == 1) + .returning(|_| { + Ok(BlockHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap()) + }); + + mock_rpc_client + .expect_get_block_hash() + .withf(|height| *height == 2) + .returning(|_| { + Ok(BlockHash::from_hex( + "1111111111111111111111111111111111111111111111111111111111111111", + ) + .unwrap()) + }); + + mock_rpc_client + .expect_get_block_json() + .withf(|bh| { + bh.to_hex() + == "0000000000000000000000000000000000000000000000000000000000000000" + }) + .returning(|_| { + Ok(json!({ + "tx": ["1"] + })) + }); + + mock_rpc_client + .expect_get_block_json() + .withf(|bh| { + bh.to_hex() + == "1111111111111111111111111111111111111111111111111111111111111111" + }) + .returning(|_| { + Ok(json!({ + "tx": ["2"] + })) + }); + + platform.core_rpc = Box::new(mock_rpc_client); + + let transactions = platform + .fetch_core_block_transactions(1, 2) + .expect("to fetch core transactions"); + + assert_eq!(transactions.len(), 2); + assert_eq!(transactions, ["1", "2"]); + } + } + + mod build_withdrawal_transactions_from_documents { + use crate::test::helpers::setup::setup_platform_with_initial_state_structure; + use dpp::data_contract::DriveContractExt; + use dpp::prelude::Identifier; + use dpp::system_data_contracts::{load_system_data_contract, SystemDataContract}; + use dpp::{ + document::document_stub::DocumentStub, + identity::state_transition::identity_credit_withdrawal_transition::Pooling, + }; + use drive::drive::block_info::BlockInfo; + use drive::drive::identity::withdrawals::WithdrawalTransactionIdAndBytes; + use drive::tests::helpers::setup::setup_system_data_contract; + use itertools::Itertools; + + use super::*; + + #[test] + fn test_build() { + let platform = setup_platform_with_initial_state_structure(None); + + let transaction = platform.drive.grove.start_transaction(); + + let data_contract = load_system_data_contract(SystemDataContract::Withdrawals) + .expect("to load system data contract"); + + setup_system_data_contract(&platform.drive, &data_contract, Some(&transaction)); + + let owner_id = Identifier::new([1u8; 32]); + + let document_1 = get_withdrawal_document_fixture( + &data_contract, + owner_id, + json!({ + "amount": 1000, + "coreFeePerByte": 1, + "pooling": Pooling::Never, + "outputScript": (0..23).collect::>(), + "status": withdrawals_contract::WithdrawalStatus::POOLED, + "transactionIndex": 1, + }), + ); + + let document_type = data_contract + .document_type_for_name(withdrawals_contract::document_types::WITHDRAWAL) + .expect("expected to get document type"); + + setup_document( + &platform.drive, + &document_1, + &data_contract, + document_type, + Some(&transaction), + ); + + let document_2 = get_withdrawal_document_fixture( + &data_contract, + owner_id, + json!({ + "amount": 1000, + "coreFeePerByte": 1, + "pooling": Pooling::Never, + "outputScript": (0..23).collect::>(), + "status": withdrawals_contract::WithdrawalStatus::POOLED, + "transactionIndex": 2, + }), + ); + + setup_document( + &platform.drive, + &document_2, + &data_contract, + document_type, + Some(&transaction), + ); + + let documents = vec![ + DocumentStub::from_cbor( + &document_1.to_buffer().expect("to convert document to cbor"), + None, + None, + ) + .expect("to create document from cbor"), + DocumentStub::from_cbor( + &document_2.to_buffer().expect("to convert document to cbor"), + None, + None, + ) + .expect("to create document from cbor"), + ]; + + let mut batch = vec![]; + + let transactions = platform + .build_withdrawal_transactions_from_documents( + &documents, + &mut batch, + Some(&transaction), + ) + .expect("to build transactions from documents"); + + platform + .drive + .apply_drive_operations(batch, true, &BlockInfo::default(), Some(&transaction)) + .expect("to apply drive op batch"); + + assert_eq!( + transactions + .values() + .cloned() + .sorted() + .collect::>(), + vec![ + ( + vec![0, 0, 0, 0, 0, 0, 0, 0], + vec![ + 1, 0, 9, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 23, 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 0, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 0, 0, 48, 230, 2, 0 + ], + ), + ( + vec![0, 0, 0, 0, 0, 0, 0, 1], + vec![ + 1, 0, 9, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 23, 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 0, 0, 0, 0, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 48, 230, 2, 0 + ], + ), + ] + .into_iter() + .sorted() + .collect::>(), + ); + } } } diff --git a/packages/rs-drive-abci/src/lib.rs b/packages/rs-drive-abci/src/lib.rs index c7bf00d3db0..ffd140c4684 100644 --- a/packages/rs-drive-abci/src/lib.rs +++ b/packages/rs-drive-abci/src/lib.rs @@ -40,6 +40,9 @@ pub mod state; /// Platform constants pub mod constants; +/// Anything related to 3rd party RPC +pub mod rpc; + // TODO We should compile it only for tests /// Test helpers and fixtures pub mod test; diff --git a/packages/rs-drive-abci/src/platform.rs b/packages/rs-drive-abci/src/platform.rs index 2b3cab8f6a8..da4a29fea67 100644 --- a/packages/rs-drive-abci/src/platform.rs +++ b/packages/rs-drive-abci/src/platform.rs @@ -32,15 +32,25 @@ use crate::block::BlockExecutionContext; use crate::config::PlatformConfig; +use crate::error::execution::ExecutionError; use crate::error::Error; +use crate::rpc::core::{CoreRPCLike, DefaultCoreRPC}; use crate::state::PlatformState; - use drive::drive::Drive; use drive::drive::defaults::PROTOCOL_VERSION; use std::cell::RefCell; use std::path::Path; +#[cfg(feature = "fixtures-and-mocks")] +use crate::rpc::core::MockCoreRPCLike; +#[cfg(feature = "fixtures-and-mocks")] +use dashcore::hashes::hex::FromHex; +#[cfg(feature = "fixtures-and-mocks")] +use dashcore::BlockHash; +#[cfg(feature = "fixtures-and-mocks")] +use serde_json::json; + /// Platform pub struct Platform { /// Drive @@ -51,14 +61,35 @@ pub struct Platform { pub config: PlatformConfig, /// Block execution context pub block_execution_context: RefCell>, + /// Core RPC Client + pub core_rpc: Box, +} + +impl std::fmt::Debug for Platform { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Platform").finish() + } } impl Platform { /// Open Platform with Drive and block execution context. pub fn open>(path: P, config: Option) -> Result { let config = config.unwrap_or_default(); + let drive = Drive::open(path, config.drive.clone()).map_err(Error::Drive)?; - let drive = Drive::open(path, Some(config.drive_config.clone())).map_err(Error::Drive)?; + let core_rpc: Box = Box::new( + DefaultCoreRPC::open( + config.core.rpc.url.as_str(), + config.core.rpc.username.clone(), + config.core.rpc.password.clone(), + ) + .map_err(|e| { + dbg!(e); + Error::Execution(ExecutionError::CorruptedCodeExecution( + "Could not setup Dash Core RPC client", + )) + })?, + ); let current_protocol_version_in_consensus = drive .fetch_current_protocol_version(None) @@ -68,6 +99,7 @@ impl Platform { .fetch_next_protocol_version(None) .map_err(Error::Drive)? .unwrap_or(PROTOCOL_VERSION); + let state = PlatformState { last_block_info: None, current_protocol_version_in_consensus, @@ -79,6 +111,29 @@ impl Platform { state: RefCell::new(state), config, block_execution_context: RefCell::new(None), + core_rpc, }) } + + /// Helper function to be able + /// to quickly mock core rpc for tests + #[cfg(feature = "fixtures-and-mocks")] + pub fn mock_core_rpc_client(&mut self) { + let mut core_rpc_mock = MockCoreRPCLike::new(); + + core_rpc_mock.expect_get_block_hash().returning(|_| { + Ok(BlockHash::from_hex( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap()) + }); + + core_rpc_mock.expect_get_block_json().returning(|_| { + Ok(json!({ + "tx": [], + })) + }); + + self.core_rpc = Box::new(core_rpc_mock); + } } diff --git a/packages/rs-drive-abci/src/rpc/core.rs b/packages/rs-drive-abci/src/rpc/core.rs new file mode 100644 index 00000000000..5a33c415dd9 --- /dev/null +++ b/packages/rs-drive-abci/src/rpc/core.rs @@ -0,0 +1,46 @@ +use dashcore::{Block, BlockHash}; +use dashcore_rpc::{Auth, Client, Error, RpcApi}; +#[cfg(feature = "fixtures-and-mocks")] +use mockall::{automock, predicate::*}; +use serde_json::Value; + +/// Core RPC interface +#[cfg_attr(feature = "fixtures-and-mocks", automock)] +pub trait CoreRPCLike { + /// Get block hash by height + fn get_block_hash(&self, height: u32) -> Result; + + /// Get block by hash + fn get_block(&self, block_hash: &BlockHash) -> Result; + + /// Get block by hash in JSON format + fn get_block_json(&self, block_hash: &BlockHash) -> Result; +} + +/// Default implementation of Dash Core RPC using DashCoreRPC client +pub struct DefaultCoreRPC { + inner: Client, +} + +impl DefaultCoreRPC { + /// Create new instance + pub fn open(url: &str, username: String, password: String) -> Result { + Ok(DefaultCoreRPC { + inner: Client::new(url, Auth::UserPass(username, password))?, + }) + } +} + +impl CoreRPCLike for DefaultCoreRPC { + fn get_block_hash(&self, height: u32) -> Result { + self.inner.get_block_hash(height as u64) + } + + fn get_block(&self, block_hash: &BlockHash) -> Result { + self.inner.get_block(block_hash) + } + + fn get_block_json(&self, block_hash: &BlockHash) -> Result { + self.inner.get_block_json(block_hash) + } +} diff --git a/packages/rs-drive-abci/src/rpc/mod.rs b/packages/rs-drive-abci/src/rpc/mod.rs new file mode 100644 index 00000000000..6575232295f --- /dev/null +++ b/packages/rs-drive-abci/src/rpc/mod.rs @@ -0,0 +1,2 @@ +/// Dash Core RPC +pub mod core; diff --git a/packages/rs-drive-abci/src/state/genesis.rs b/packages/rs-drive-abci/src/state/genesis.rs index e940abc2757..46a47fd4dc9 100644 --- a/packages/rs-drive-abci/src/state/genesis.rs +++ b/packages/rs-drive-abci/src/state/genesis.rs @@ -269,6 +269,7 @@ impl Platform { id: DPNS_DASH_TLD_DOCUMENT_ID, properties: document_stub_properties, owner_id: contract.owner_id.to_buffer(), + revision: 1, }; let document_type = contract.document_type_for_name("domain")?; @@ -299,8 +300,6 @@ impl Platform { #[cfg(test)] mod tests { - use super::*; - mod create_genesis_state { use crate::test::helpers::setup::setup_platform_with_genesis_state; @@ -318,8 +317,8 @@ mod tests { assert_eq!( root_hash, [ - 223, 238, 172, 129, 164, 189, 41, 188, 64, 164, 89, 92, 216, 66, 225, 17, 106, - 82, 196, 145, 103, 87, 239, 183, 226, 201, 79, 71, 248, 183, 191, 178, + 59, 16, 30, 145, 9, 47, 66, 85, 133, 88, 194, 109, 241, 15, 226, 214, 163, 196, + 146, 107, 122, 145, 111, 45, 251, 242, 250, 157, 153, 43, 219, 184 ] ) } diff --git a/packages/rs-drive-abci/src/test/helpers/fee_pools.rs b/packages/rs-drive-abci/src/test/helpers/fee_pools.rs index 6882da564b0..8ebbeac2f54 100644 --- a/packages/rs-drive-abci/src/test/helpers/fee_pools.rs +++ b/packages/rs-drive-abci/src/test/helpers/fee_pools.rs @@ -36,6 +36,7 @@ use std::borrow::Cow; use std::collections::BTreeMap; use ciborium::value::Value; +use dpp::document::document_transition::INITIAL_REVISION; use drive::dpp::identity::Identity; use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; @@ -76,6 +77,7 @@ fn create_test_mn_share_document( id, properties, owner_id: identity_id, + revision: INITIAL_REVISION as u64, }; let document_type = contract diff --git a/packages/rs-drive-abci/src/test/helpers/setup.rs b/packages/rs-drive-abci/src/test/helpers/setup.rs index f466bbaef12..c33b12e12a8 100644 --- a/packages/rs-drive-abci/src/test/helpers/setup.rs +++ b/packages/rs-drive-abci/src/test/helpers/setup.rs @@ -32,7 +32,6 @@ //! This module defines helper functions related to setting up Platform. //! -use crate::abci::messages::SystemIdentityPublicKeys; use crate::config::PlatformConfig; use crate::platform::Platform; use crate::test::fixture::abci::static_system_identity_public_keys; @@ -41,20 +40,28 @@ use tempfile::TempDir; /// A function which sets up Platform. pub fn setup_platform_raw(config: Option) -> Platform { let tmp_dir = TempDir::new().unwrap(); - let drive: Platform = + + let mut platform: Platform = Platform::open(tmp_dir, config).expect("should open Platform successfully"); - drive + #[cfg(feature = "fixtures-and-mocks")] + platform.mock_core_rpc_client(); + + platform } /// A function which sets up Platform with its initial state structure. pub fn setup_platform_with_initial_state_structure(config: Option) -> Platform { - let platform = setup_platform_raw(config); + let mut platform = setup_platform_raw(config); + platform .drive .create_initial_state_structure(None) .expect("should create root tree successfully"); + #[cfg(feature = "fixtures-and-mocks")] + platform.mock_core_rpc_client(); + platform } diff --git a/packages/rs-drive-abci/tests/strategy_tests/main.rs b/packages/rs-drive-abci/tests/strategy_tests/main.rs index 81536466da1..4744b9bba30 100644 --- a/packages/rs-drive-abci/tests/strategy_tests/main.rs +++ b/packages/rs-drive-abci/tests/strategy_tests/main.rs @@ -51,7 +51,6 @@ use drive::fee::credits::Credits; use drive::fee_pools::epochs::Epoch; use drive::query::DriveQuery; use drive_abci::abci::handlers::TenderdashAbci; -use drive_abci::abci::messages::{InitChainRequest, SystemIdentityPublicKeys}; use drive_abci::config::PlatformConfig; use drive_abci::execution::engine::ExecutionEvent; use drive_abci::execution::fee_pools::epoch::{EpochInfo, EPOCH_CHANGE_TIME_MS}; @@ -592,10 +591,10 @@ mod tests { upgrading_info: None, }; let config = PlatformConfig { - drive_config: Default::default(), verify_sum_trees: true, quorum_size: 100, validator_set_quorum_rotation_block_count: 25, + ..Default::default() }; run_chain_for_strategy(1000, 3000, strategy, config, 15); } @@ -613,10 +612,10 @@ mod tests { upgrading_info: None, }; let config = PlatformConfig { - drive_config: Default::default(), verify_sum_trees: true, quorum_size: 100, validator_set_quorum_rotation_block_count: 25, + ..Default::default() }; let outcome = run_chain_for_strategy(100, 3000, strategy, config, 15); @@ -636,10 +635,10 @@ mod tests { upgrading_info: None, }; let config = PlatformConfig { - drive_config: Default::default(), verify_sum_trees: true, quorum_size: 100, validator_set_quorum_rotation_block_count: 100, + ..Default::default() }; let day_in_ms = 1000 * 60 * 60 * 24; let outcome = run_chain_for_strategy(150, day_in_ms, strategy, config, 15); @@ -673,10 +672,10 @@ mod tests { upgrading_info: None, }; let config = PlatformConfig { - drive_config: Default::default(), verify_sum_trees: true, quorum_size: 100, validator_set_quorum_rotation_block_count: 25, + ..Default::default() }; run_chain_for_strategy(1, 3000, strategy, config, 15); } @@ -717,10 +716,10 @@ mod tests { upgrading_info: None, }; let config = PlatformConfig { - drive_config: Default::default(), verify_sum_trees: true, quorum_size: 100, validator_set_quorum_rotation_block_count: 25, + ..Default::default() }; run_chain_for_strategy(100, 3000, strategy, config, 15); } @@ -761,10 +760,10 @@ mod tests { upgrading_info: None, }; let config = PlatformConfig { - drive_config: Default::default(), verify_sum_trees: true, quorum_size: 100, validator_set_quorum_rotation_block_count: 100, + ..Default::default() }; let day_in_ms = 1000 * 60 * 60 * 24; let block_count = 120; @@ -833,10 +832,10 @@ mod tests { upgrading_info: None, }; let config = PlatformConfig { - drive_config: Default::default(), verify_sum_trees: true, quorum_size: 100, validator_set_quorum_rotation_block_count: 100, + ..Default::default() }; let day_in_ms = 1000 * 60 * 60 * 24; let block_count = 120; @@ -905,10 +904,10 @@ mod tests { upgrading_info: None, }; let config = PlatformConfig { - drive_config: Default::default(), verify_sum_trees: true, quorum_size: 100, validator_set_quorum_rotation_block_count: 100, + ..Default::default() }; let day_in_ms = 1000 * 60 * 60 * 24; let block_count = 120; @@ -977,10 +976,10 @@ mod tests { upgrading_info: None, }; let config = PlatformConfig { - drive_config: Default::default(), verify_sum_trees: true, quorum_size: 100, validator_set_quorum_rotation_block_count: 100, + ..Default::default() }; let day_in_ms = 1000 * 60 * 60 * 24; let block_count = 30; diff --git a/packages/rs-drive-abci/tests/strategy_tests/upgrade_fork_tests.rs b/packages/rs-drive-abci/tests/strategy_tests/upgrade_fork_tests.rs index 5e4c536db68..6026bc68ed3 100644 --- a/packages/rs-drive-abci/tests/strategy_tests/upgrade_fork_tests.rs +++ b/packages/rs-drive-abci/tests/strategy_tests/upgrade_fork_tests.rs @@ -25,10 +25,10 @@ mod tests { }), }; let config = PlatformConfig { - drive_config: Default::default(), verify_sum_trees: true, quorum_size: 100, validator_set_quorum_rotation_block_count: 125, + ..Default::default() }; let twenty_minutes_in_ms = 1000 * 60 * 20; let ChainExecutionOutcome { @@ -219,10 +219,10 @@ mod tests { }), }; let config = PlatformConfig { - drive_config: Default::default(), verify_sum_trees: true, quorum_size: 40, validator_set_quorum_rotation_block_count: 50, + ..Default::default() }; let hour_in_ms = 1000 * 60 * 60; let ChainExecutionOutcome { @@ -391,10 +391,10 @@ mod tests { }), }; let config = PlatformConfig { - drive_config: Default::default(), verify_sum_trees: true, quorum_size: 50, validator_set_quorum_rotation_block_count: 60, + ..Default::default() }; let hour_in_ms = 1000 * 60 * 60; let ChainExecutionOutcome { @@ -638,10 +638,10 @@ mod tests { }), }; let config = PlatformConfig { - drive_config: Default::default(), verify_sum_trees: true, quorum_size: 50, validator_set_quorum_rotation_block_count: 60, + ..Default::default() }; let hour_in_ms = 1000 * 60 * 60; let ChainExecutionOutcome { diff --git a/packages/rs-drive-nodejs/Cargo.toml b/packages/rs-drive-nodejs/Cargo.toml index b9a3a70ae5b..4b1bd9d9079 100644 --- a/packages/rs-drive-nodejs/Cargo.toml +++ b/packages/rs-drive-nodejs/Cargo.toml @@ -10,11 +10,14 @@ private = true crate-type = ["cdylib"] [dependencies] -drive = { path = "../rs-drive" } -drive-abci = { path = "../rs-drive-abci" } +drive = { path = "../rs-drive", features = ["fixtures-and-mocks"] } +drive-abci = { path = "../rs-drive-abci", features = ["fixtures-and-mocks"] } num = "0.4.0" [dependencies.neon] version = "0.10.1" default-features = false features = ["napi-6", "event-queue-api", "try-catch-api"] + +[features] +enable-mocking = [] diff --git a/packages/rs-drive-nodejs/Drive.js b/packages/rs-drive-nodejs/Drive.js index d5296aa8ff7..d1ac2239a74 100644 --- a/packages/rs-drive-nodejs/Drive.js +++ b/packages/rs-drive-nodejs/Drive.js @@ -34,7 +34,6 @@ const { driveRemoveFromIdentityBalance, driveApplyFeesToIdentityBalance, driveFetchLatestWithdrawalTransactionIndex, - driveEnqueueWithdrawalTransaction, abciInitChain, abciBlockBegin, abciBlockEnd, @@ -70,9 +69,6 @@ const driveProveDocumentsQueryAsync = appendStackAsync(promisify(driveProveDocum const driveFetchLatestWithdrawalTransactionIndexAsync = appendStackAsync( promisify(driveFetchLatestWithdrawalTransactionIndex), ); -const driveEnqueueWithdrawalTransactionAsync = appendStackAsync( - promisify(driveEnqueueWithdrawalTransaction), -); const driveInsertIdentityAsync = appendStackAsync(promisify(driveInsertIdentity)); const driveFetchIdentityAsync = appendStackAsync(promisify(driveFetchIdentity)); const driveFetchProvedIdentityAsync = appendStackAsync(promisify(driveFetchProvedIdentity)); @@ -736,31 +732,17 @@ class Drive { /** * Fetch the latest index of the withdrawal transaction in a queue * + * @param {RawBlockInfo} blockInfo * @param {boolean} [useTransaction=false] + * @param {boolean} [dryRun=false] * * @returns {Promise} */ - async fetchLatestWithdrawalTransactionIndex(useTransaction = false) { + async fetchLatestWithdrawalTransactionIndex(blockInfo, useTransaction = false, dryRun = false) { return driveFetchLatestWithdrawalTransactionIndexAsync.call( this.drive, - useTransaction, - ); - } - - /** - * Enqueue withdrawal transaction into the queue - * - * @param {number} index - * @param {Buffer} transactionBytes - * @param {boolean} [useTransaction=false] - * - * @returns {Promise} - */ - async enqueueWithdrawalTransaction(index, transactionBytes, useTransaction = false) { - return driveEnqueueWithdrawalTransactionAsync.call( - this.drive, - index, - transactionBytes, + blockInfo, + !dryRun, useTransaction, ); } @@ -908,6 +890,8 @@ Drive.FeeResult = FeeResult; * @property {number} [previousBlockTimeMs] - timestamp in milliseconds * @property {Buffer} proposerProTxHash * @property {Buffer} validatorSetQuorumHash + * @property {number} lastSyncedCoreHeight + * @property {number} coreChainLockedHeight, * @property {number} proposedAppVersion * @property {number} totalHpmns */ diff --git a/packages/rs-drive-nodejs/package.json b/packages/rs-drive-nodejs/package.json index 57461ffed31..7fc523c135b 100644 --- a/packages/rs-drive-nodejs/package.json +++ b/packages/rs-drive-nodejs/package.json @@ -5,7 +5,7 @@ "main": "Drive.js", "scripts": { "build": "yarn exec scripts/build.sh", - "test": "ultra --build && mocha test", + "test": "NODE_ENV=test ultra --build && mocha test", "lint": "eslint ." }, "files": [ @@ -18,6 +18,7 @@ "license": "MIT", "devDependencies": { "@dashevo/dashcore-lib": "github:dashevo/dashcore-lib#3527419e8739b5e7d4017028d642dba8851c3e25", + "@dashevo/withdrawals-contract": "workspace:*", "chai": "^4.3.4", "dirty-chai": "^2.0.1", "eslint": "^7.32.0", diff --git a/packages/rs-drive-nodejs/scripts/build.sh b/packages/rs-drive-nodejs/scripts/build.sh index dc316035f0e..6e169fdf681 100755 --- a/packages/rs-drive-nodejs/scripts/build.sh +++ b/packages/rs-drive-nodejs/scripts/build.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash PROFILE_ARG="" +FEATURE_FLAG="" if [ -n "$CARGO_BUILD_PROFILE" ]; then if [ "$CARGO_BUILD_PROFILE" == "release" ]; then @@ -10,7 +11,13 @@ if [ -n "$CARGO_BUILD_PROFILE" ]; then fi fi +if [ -n "$NODE_ENV" ]; then + if [ "$NODE_ENV" == "test" ]; then + FEATURE_FLAG="--features enable-mocking" + fi +fi + cargo-cp-artifact -ac drive-nodejs native/index.node -- \ - cargo build --message-format=json-render-diagnostics $PROFILE_ARG \ + cargo build --message-format=json-render-diagnostics $PROFILE_ARG $FEATURE_FLAG \ && neon-tag-prebuild \ && rm -rf native diff --git a/packages/rs-drive-nodejs/src/lib.rs b/packages/rs-drive-nodejs/src/lib.rs index b98d7affd60..07bc33ef6b0 100644 --- a/packages/rs-drive-nodejs/src/lib.rs +++ b/packages/rs-drive-nodejs/src/lib.rs @@ -1,15 +1,17 @@ mod converter; mod fee; +use drive::drive::config::DriveConfig; +use drive_abci::config::{CoreConfig, CoreRpcConfig, PlatformConfig}; + use std::ops::Deref; use std::{option::Option::None, path::Path, sync::mpsc, thread}; use crate::converter::js_object_to_fee_refunds; use crate::fee::result::FeeResultWrapper; + use drive::dpp::identity::{KeyID, TimestampMillis}; use drive::dpp::prelude::Revision; -use drive::drive::batch::GroveDbOpBatch; -use drive::drive::config::DriveConfig; use drive::drive::flags::StorageFlags; use drive::drive::query::QueryDocumentsOutcome; use drive::error::Error; @@ -22,7 +24,6 @@ use drive_abci::abci::messages::{ AfterFinalizeBlockRequest, BlockBeginRequest, BlockEndRequest, BlockFees, InitChainRequest, Serializable, }; -use drive_abci::config::PlatformConfig; use drive_abci::platform::Platform; use fee::js_calculate_storage_fee_distribution_amount_and_leftovers; use neon::prelude::*; @@ -63,7 +64,19 @@ impl PlatformWrapper { fn new(cx: &mut FunctionContext) -> NeonResult { // Drive's configuration let path_string = cx.argument::(0)?.value(cx); - let drive_config = cx.argument::(1)?; + let platform_config = cx.argument::(1)?; + + let drive_config: Handle = platform_config.get(cx, "drive")?; + let core_config: Handle = platform_config.get(cx, "core")?; + let core_rpc_config: Handle = core_config.get(cx, "rpc")?; + + let js_core_rpc_url: Handle = core_rpc_config.get(cx, "url")?; + let js_core_rpc_username: Handle = core_rpc_config.get(cx, "username")?; + let js_core_rpc_password: Handle = core_rpc_config.get(cx, "password")?; + + let core_rpc_url = js_core_rpc_url.value(cx); + let core_rpc_username = js_core_rpc_username.value(cx); + let core_rpc_password = js_core_rpc_password.value(cx); let js_data_contracts_cache_size: Handle = drive_config.get(cx, "dataContractsGlobalCacheSize")?; @@ -103,13 +116,27 @@ impl PlatformWrapper { ..Default::default() }; + let core_config = CoreConfig { + rpc: CoreRpcConfig { + url: core_rpc_url, + username: core_rpc_username, + password: core_rpc_password, + }, + }; + let platform_config = PlatformConfig { - drive_config, + drive: Some(drive_config), + core: core_config, + verify_sum_trees: true, ..Default::default() }; // TODO: think how to pass this error to JS - let platform: Platform = Platform::open(path, Some(platform_config)).unwrap(); + let mut platform: Platform = Platform::open(path, Some(platform_config)).unwrap(); + + if cfg!(feature = "enable-mocking") { + platform.mock_core_rpc_client(); + } let mut maybe_transaction: Option = None; @@ -2178,7 +2205,7 @@ impl PlatformWrapper { let result = transaction_result.and_then(|transaction_arg| { platform .drive - .query_documents_as_grove_proof( + .query_proof_of_documents_using_contract_id_using_cbor_encoded_query_with_cost( &query_cbor, contract_id, document_type_name.as_str(), @@ -3296,9 +3323,13 @@ impl PlatformWrapper { fn js_fetch_latest_withdrawal_transaction_index( mut cx: FunctionContext, ) -> JsResult { - let js_using_transaction = cx.argument::(0)?; - let js_callback = cx.argument::(1)?.root(&mut cx); + let js_block_info = cx.argument::(0)?; + let js_apply = cx.argument::(1)?; + let js_using_transaction = cx.argument::(2)?; + let js_callback = cx.argument::(3)?.root(&mut cx); + let apply = js_apply.value(&mut cx); + let block_info = converter::js_object_to_block_info(&mut cx, js_block_info)?; let using_transaction = js_using_transaction.value(&mut cx); let db = cx @@ -3317,10 +3348,27 @@ impl PlatformWrapper { }; let result = transaction_result.and_then(|transaction_arg| { + let mut drive_operation_types = vec![]; + + let result = platform + .drive + .fetch_and_remove_latest_withdrawal_transaction_index_operations( + &mut drive_operation_types, + transaction_arg, + ) + .map_err(|err| err.to_string())?; + platform .drive - .fetch_latest_withdrawal_transaction_index(transaction_arg) - .map_err(|err| err.to_string()) + .apply_drive_operations( + drive_operation_types, + apply, + &block_info, + transaction_arg, + ) + .map_err(|err| err.to_string())?; + + Ok(result) }); channel.send(move |mut task_context| { @@ -3355,76 +3403,6 @@ impl PlatformWrapper { // The result is returned through the callback, not through direct return Ok(cx.undefined()) } - - fn js_enqueue_withdrawal_transaction(mut cx: FunctionContext) -> JsResult { - let js_index = cx.argument::(0)?; - let js_core_transaction = cx.argument::(1)?; - let js_using_transaction = cx.argument::(2)?; - let js_callback = cx.argument::(3)?.root(&mut cx); - - let db = cx - .this() - .downcast_or_throw::, _>(&mut cx)?; - - let index = js_index.value(&mut cx); - let transaction_bytes = converter::js_buffer_to_vec_u8(&mut cx, js_core_transaction); - let using_transaction = js_using_transaction.value(&mut cx); - - db.send_to_drive_thread(move |platform: &Platform, transaction, channel| { - let transaction_result = if using_transaction { - if transaction.is_none() { - Err("transaction is not started".to_string()) - } else { - Ok(transaction) - } - } else { - Ok(None) - }; - - let mut batch = GroveDbOpBatch::new(); - - let index_bytes = (index as u64).to_be_bytes().to_vec(); - - let withdrawals = vec![(index_bytes.clone(), transaction_bytes)]; - - platform - .drive - .add_enqueue_withdrawal_transaction_operations(&mut batch, withdrawals); - - platform - .drive - .add_update_withdrawal_index_counter_operation(&mut batch, index_bytes); - - let result = transaction_result.and_then(|transaction_arg| { - platform - .drive - .grove_apply_batch(batch, false, transaction_arg) - .map_err(|err| err.to_string()) - }); - - channel.send(move |mut task_context| { - let callback = js_callback.into_inner(&mut task_context); - let this = task_context.undefined(); - - let callback_arguments: Vec> = match result { - Ok(_) => { - vec![task_context.null().upcast(), task_context.null().upcast()] - } - - // Convert the error to a JavaScript exception on failure - Err(err) => vec![task_context.error(err)?.upcast()], - }; - - callback.call(&mut task_context, this, callback_arguments)?; - - Ok(()) - }); - }) - .or_else(|err| cx.throw_error(err.to_string()))?; - - // The result is returned through the callback, not through direct return - Ok(cx.undefined()) - } } #[neon::main] @@ -3518,10 +3496,6 @@ fn main(mut cx: ModuleContext) -> NeonResult<()> { "driveFetchLatestWithdrawalTransactionIndex", PlatformWrapper::js_fetch_latest_withdrawal_transaction_index, )?; - cx.export_function( - "driveEnqueueWithdrawalTransaction", - PlatformWrapper::js_enqueue_withdrawal_transaction, - )?; cx.export_function("groveDbInsert", PlatformWrapper::js_grove_db_insert)?; cx.export_function( diff --git a/packages/rs-drive-nodejs/test/Drive.spec.js b/packages/rs-drive-nodejs/test/Drive.spec.js index 523b898f3d5..931d7d34a07 100644 --- a/packages/rs-drive-nodejs/test/Drive.spec.js +++ b/packages/rs-drive-nodejs/test/Drive.spec.js @@ -5,11 +5,18 @@ const { PrivateKey } = require('@dashevo/dashcore-lib'); const { expect, use } = require('chai'); use(require('dirty-chai')); +const DashPlatformProtocol = require('@dashevo/dpp'); + const Document = require('@dashevo/dpp/lib/document/Document'); +const Identifier = require('@dashevo/dpp/lib/Identifier'); const getDataContractFixture = require('@dashevo/dpp/lib/test/fixtures/getDataContractFixture'); const getDocumentsFixture = require('@dashevo/dpp/lib/test/fixtures/getDocumentsFixture'); const getIdentityFixture = require('@dashevo/dpp/lib/test/fixtures/getIdentityFixture'); +const generateRandomIdentifier = require('@dashevo/dpp/lib/test/utils/generateRandomIdentifier'); + +const withdrawalContractDocumentsSchema = require('@dashevo/withdrawals-contract/schema/withdrawals-documents.json'); +const withdrawalContractIds = require('@dashevo/withdrawals-contract/lib/systemIds'); const { expectFeeResult, @@ -21,20 +28,47 @@ const FeeResult = require('../FeeResult'); const TEST_DATA_PATH = './test_data'; -describe('Drive', () => { +describe('Drive', function main() { + this.timeout(10000); + let drive; let dataContract; let identity; let blockInfo; let documents; let initialRootHash; + let withdrawalsDataContract; beforeEach(async () => { drive = new Drive(TEST_DATA_PATH, { - dataContractsGlobalCacheSize: 500, - dataContractsBlockCacheSize: 500, + drive: { + dataContractsGlobalCacheSize: 500, + dataContractsBlockCacheSize: 500, + }, + core: { + rpc: { + url: '127.0.0.1', + username: '', + password: '', + }, + }, + }); + + const dpp = new DashPlatformProtocol({ + stateRepository: { + fetchDataContract: () => { }, + }, }); + await dpp.initialize(); + + withdrawalsDataContract = dpp.dataContract.create( + generateRandomIdentifier(), + withdrawalContractDocumentsSchema, + ); + + withdrawalsDataContract.id = Identifier.from(withdrawalContractIds.contractId); + dataContract = getDataContractFixture(); identity = getIdentityFixture(); @@ -952,26 +986,16 @@ describe('Drive', () => { }); it('should return 0 on the first call', async () => { - const result = await drive.fetchLatestWithdrawalTransactionIndex(); + const result = await drive.fetchLatestWithdrawalTransactionIndex({ + height: 1, + epoch: 1, + timeMs: (new Date()).getTime(), + }); expect(result).to.equal(0); }); }); - describe('#enqueueWithdrawalTransaction', () => { - beforeEach(async () => { - await drive.createInitialStateStructure(); - }); - - it('should enqueue withdrawal transaction into the queue', async () => { - await drive.enqueueWithdrawalTransaction(1, Buffer.alloc(32, 1)); - - const result = await drive.fetchLatestWithdrawalTransactionIndex(); - - expect(result).to.equal(1); - }); - }); - describe('ABCI', () => { let initChainRequest; @@ -1015,6 +1039,8 @@ describe('Drive', () => { proposerProTxHash: Buffer.alloc(32, 1), proposedAppVersion: 1, validatorSetQuorumHash: Buffer.alloc(32, 2), + lastSyncedCoreHeight: 1, + coreChainLockedHeight: 1, totalHpmns: 100, }; @@ -1037,6 +1063,8 @@ describe('Drive', () => { proposerProTxHash: Buffer.alloc(32, 1), proposedAppVersion: 1, validatorSetQuorumHash: Buffer.alloc(32, 2), + lastSyncedCoreHeight: 1, + coreChainLockedHeight: 1, totalHpmns: 100, }); @@ -1047,6 +1075,8 @@ describe('Drive', () => { previousBlockTimeMs: blockTimeMs, proposedAppVersion: 1, validatorSetQuorumHash: Buffer.alloc(32, 2), + lastSyncedCoreHeight: 1, + coreChainLockedHeight: 1, totalHpmns: 100, }); @@ -1069,6 +1099,8 @@ describe('Drive', () => { proposedAppVersion: 1, proposerProTxHash: Buffer.alloc(32, 1), validatorSetQuorumHash: Buffer.alloc(32, 2), + lastSyncedCoreHeight: 1, + coreChainLockedHeight: 1, totalHpmns: 100, }); }); @@ -1078,7 +1110,7 @@ describe('Drive', () => { fees: { storageFee: 0, processingFee: 0, - refundsPerEpoch: { }, + refundsPerEpoch: {}, }, }; @@ -1096,12 +1128,17 @@ describe('Drive', () => { await drive.getAbci().initChain(initChainRequest); + await drive.createContract(dataContract, blockInfo); + await drive.createContract(withdrawalsDataContract, blockInfo); + await drive.getAbci().blockBegin({ blockHeight: 1, blockTimeMs: (new Date()).getTime(), proposedAppVersion: 1, proposerProTxHash: Buffer.alloc(32, 1), validatorSetQuorumHash: Buffer.alloc(32, 2), + lastSyncedCoreHeight: 1, + coreChainLockedHeight: 1, totalHpmns: 100, }); diff --git a/packages/rs-drive-nodejs/test/GroveDB.spec.js b/packages/rs-drive-nodejs/test/GroveDB.spec.js index eb00db9a5a8..0e2000d161a 100644 --- a/packages/rs-drive-nodejs/test/GroveDB.spec.js +++ b/packages/rs-drive-nodejs/test/GroveDB.spec.js @@ -19,8 +19,17 @@ describe('GroveDB', () => { beforeEach(() => { drive = new Drive(TEST_DATA_PATH, { - dataContractsGlobalCacheSize: 500, - dataContractsBlockCacheSize: 500, + drive: { + dataContractsGlobalCacheSize: 500, + dataContractsBlockCacheSize: 500, + }, + core: { + rpc: { + url: '127.0.0.1', + username: '', + password: '', + }, + }, }); groveDb = drive.getGroveDB(); diff --git a/packages/rs-drive/Cargo.toml b/packages/rs-drive/Cargo.toml index 60557d245ca..50cec9736e6 100644 --- a/packages/rs-drive/Cargo.toml +++ b/packages/rs-drive/Cargo.toml @@ -28,9 +28,10 @@ intmap = { version="2.0.0", features=["serde"] } nohash-hasher = "0.2.0" chrono = "0.4.20" bincode = "1.3.3" -dpp = { path = "../rs-dpp" } +dpp = { path = "../rs-dpp", features = ["fixtures-and-mocks"]} itertools = { version = "0.10.5" } -dashcore = { git="https://github.com/dashpay/rust-dashcore", features=["no-std", "secp-recovery", "rand", "signer"], default-features = false, branch="master" } +dashcore = { git="https://github.com/dashevo/rust-dashcore", features=["std", "secp-recovery", "rand", "signer", "use-serde"], default-features = false, rev = "51548a4a1b9eca7430f5f3caf94d9784886ff2e9" } +mockall= { version ="0.11", optional = true } rust_decimal = "1.2.5" rust_decimal_macros = "1.25.0" lazy_static = "1.4.0" @@ -49,8 +50,11 @@ rev = "3f07f53175d99e4a3eab7b53d8bd221f59ea9047" [dev-dependencies] criterion = "0.3.5" -dpp = { path = "../rs-dpp", features=["mocks"]} [[bench]] name = "benchmarks" harness = false + +[features] +default = ["fixtures-and-mocks"] +fixtures-and-mocks = ["mockall"] diff --git a/packages/rs-drive/benches/benchmarks.rs b/packages/rs-drive/benches/benchmarks.rs index 2f45c04880c..e88c3ac3856 100644 --- a/packages/rs-drive/benches/benchmarks.rs +++ b/packages/rs-drive/benches/benchmarks.rs @@ -39,7 +39,6 @@ use dpp::data_contract::DriveContractExt; use dpp::document::document_stub::DocumentStub; use drive::contract::Contract; use drive::contract::CreateRandomDocument; -use serde::Serialize; criterion_main!(serialization, deserialization); criterion_group!(serialization, test_drive_10_serialization); diff --git a/packages/rs-drive/src/common/helpers/mod.rs b/packages/rs-drive/src/common/helpers/mod.rs index ece28e30146..990fed2c0e5 100644 --- a/packages/rs-drive/src/common/helpers/mod.rs +++ b/packages/rs-drive/src/common/helpers/mod.rs @@ -1,3 +1,2 @@ pub mod epoch; pub mod identities; -pub mod setup; diff --git a/packages/rs-drive/src/drive/batch/drive_op_batch/document.rs b/packages/rs-drive/src/drive/batch/drive_op_batch/document.rs index 8bafe9489f7..065fde57c5e 100644 --- a/packages/rs-drive/src/drive/batch/drive_op_batch/document.rs +++ b/packages/rs-drive/src/drive/batch/drive_op_batch/document.rs @@ -229,7 +229,7 @@ impl DriveOperationConverter for DocumentOperationType<'_> { document_info, owner_id, }, - contract: &contract, + contract, document_type, }; drive.add_document_for_contract_operations( @@ -348,7 +348,7 @@ impl DriveOperationConverter for DocumentOperationType<'_> { document_info, owner_id, }, - contract: &contract, + contract, document_type, }; drive.update_document_for_contract_operations( @@ -377,7 +377,7 @@ impl DriveOperationConverter for DocumentOperationType<'_> { document_info, owner_id, }, - contract: &contract, + contract, document_type, }; drive.update_document_for_contract_operations( diff --git a/packages/rs-drive/src/drive/batch/drive_op_batch/mod.rs b/packages/rs-drive/src/drive/batch/drive_op_batch/mod.rs index 41ec8dc50cd..8dc769646ca 100644 --- a/packages/rs-drive/src/drive/batch/drive_op_batch/mod.rs +++ b/packages/rs-drive/src/drive/batch/drive_op_batch/mod.rs @@ -31,24 +31,30 @@ mod contract; mod document; mod identity; mod system; +mod withdrawals; use crate::drive::batch::GroveDbOpBatch; use crate::drive::block_info::BlockInfo; - use crate::drive::Drive; use crate::error::Error; use crate::fee::calculate_fee; use crate::fee::op::DriveOperation; use crate::fee::result::FeeResult; + pub use contract::ContractOperationType; +pub use document::DocumentOperation; pub use document::DocumentOperationType; +pub use document::DocumentOperationsForContractDocumentType; +pub use document::UpdateOperationInfo; +pub use identity::IdentityOperationType; +pub use system::SystemOperationType; +pub use withdrawals::WithdrawalOperationType; -use grovedb::batch::{GroveDbOp, KeyInfoPath}; use grovedb::{EstimatedLayerInformation, TransactionArg}; -pub use identity::IdentityOperationType; + +use grovedb::batch::{GroveDbOp, KeyInfoPath}; use itertools::Itertools; use std::collections::HashMap; -pub use system::SystemOperationType; /// A converter that will get Drive Operations from High Level Operations pub trait DriveOperationConverter { @@ -71,6 +77,8 @@ pub enum DriveOperationType<'a> { ContractOperation(ContractOperationType<'a>), /// A document operation DocumentOperation(DocumentOperationType<'a>), + /// Withdrawal operation + WithdrawalOperation(WithdrawalOperationType<'a>), /// An identity operation IdentityOperation(IdentityOperationType), /// A system operation @@ -104,6 +112,14 @@ impl DriveOperationConverter for DriveOperationType<'_> { transaction, ) } + DriveOperationType::WithdrawalOperation(withdrawal_operation_type) => { + withdrawal_operation_type.to_drive_operations( + drive, + estimated_costs_only_with_layer_info, + block_info, + transaction, + ) + } DriveOperationType::IdentityOperation(identity_operation_type) => { identity_operation_type.to_drive_operations( drive, @@ -152,6 +168,9 @@ impl Drive { block_info: &BlockInfo, transaction: TransactionArg, ) -> Result { + if operations.is_empty() { + return Ok(FeeResult::default()); + } let mut drive_operations = vec![]; let mut estimated_costs_only_with_layer_info = if apply { None::> diff --git a/packages/rs-drive/src/drive/batch/drive_op_batch/withdrawals.rs b/packages/rs-drive/src/drive/batch/drive_op_batch/withdrawals.rs new file mode 100644 index 00000000000..c674246d859 --- /dev/null +++ b/packages/rs-drive/src/drive/batch/drive_op_batch/withdrawals.rs @@ -0,0 +1,154 @@ +use std::collections::HashMap; + +use grovedb::Element; +use grovedb::{batch::KeyInfoPath, EstimatedLayerInformation, TransactionArg}; + +use crate::drive::grove_operations::BatchDeleteApplyType; +use crate::drive::identity::withdrawals::paths::{ + get_withdrawal_root_path_vec, get_withdrawal_transactions_expired_ids_path, + get_withdrawal_transactions_expired_ids_path_vec, get_withdrawal_transactions_queue_path, + get_withdrawal_transactions_queue_path_vec, WITHDRAWAL_TRANSACTIONS_COUNTER_ID, +}; +use crate::drive::identity::withdrawals::WithdrawalTransactionIdAndBytes; +use crate::drive::object_size_info::PathKeyElementInfo; +use crate::{ + drive::{block_info::BlockInfo, Drive}, + error::Error, + fee::op::DriveOperation, +}; + +use super::DriveOperationConverter; + +/// Operations for Withdrawals +#[derive(Clone, Debug)] +pub enum WithdrawalOperationType<'a> { + /// Inserts expired index into it's tree + InsertExpiredIndex { + /// index value + index: u64, + }, + /// Removes expired index from the tree + DeleteExpiredIndex { + /// index value + key: Vec, + }, + /// Update index counter + UpdateIndexCounter { + /// index counter value + index: u64, + }, + /// Insert Core Transaction into queue + InsertTransactions { + /// transaction id bytes + withdrawal_transactions: &'a [WithdrawalTransactionIdAndBytes], + }, + /// Delete withdrawal + DeleteWithdrawalTransaction { + /// withdrawal transaction tuple with id and bytes + id: Vec, + }, +} + +impl DriveOperationConverter for WithdrawalOperationType<'_> { + fn to_drive_operations( + self, + drive: &Drive, + _estimated_costs_only_with_layer_info: &mut Option< + HashMap, + >, + _block_info: &BlockInfo, + transaction: TransactionArg, + ) -> Result, Error> { + match self { + WithdrawalOperationType::InsertExpiredIndex { index } => { + let mut drive_operations = vec![]; + + let index_bytes = index.to_be_bytes(); + + let path = get_withdrawal_transactions_expired_ids_path_vec(); + + drive.batch_insert( + PathKeyElementInfo::PathKeyElement::<'_, 1>(( + path, + index_bytes.to_vec(), + Element::Item(vec![], None), + )), + &mut drive_operations, + )?; + + Ok(drive_operations) + } + WithdrawalOperationType::DeleteExpiredIndex { key } => { + let mut drive_operations = vec![]; + + let path: [&[u8]; 2] = get_withdrawal_transactions_expired_ids_path(); + + drive.batch_delete( + path, + &key, + BatchDeleteApplyType::StatefulBatchDelete { + is_known_to_be_subtree_with_sum: Some((false, false)), + }, + transaction, + &mut drive_operations, + )?; + + Ok(drive_operations) + } + WithdrawalOperationType::UpdateIndexCounter { index } => { + let mut drive_operations = vec![]; + + let path = get_withdrawal_root_path_vec(); + + drive.batch_insert( + PathKeyElementInfo::PathKeyRefElement::<'_, 1>(( + path, + &WITHDRAWAL_TRANSACTIONS_COUNTER_ID, + Element::Item(index.to_be_bytes().to_vec(), None), + )), + &mut drive_operations, + )?; + + Ok(drive_operations) + } + WithdrawalOperationType::InsertTransactions { + withdrawal_transactions, + } => { + let mut drive_operations = vec![]; + + let path = get_withdrawal_transactions_queue_path_vec(); + + for (id, bytes) in withdrawal_transactions { + drive.batch_insert( + PathKeyElementInfo::PathKeyElement::<'_, 1>(( + path.clone(), + id.clone(), + Element::Item(bytes.clone(), None), + )), + &mut drive_operations, + )?; + } + + Ok(drive_operations) + } + WithdrawalOperationType::DeleteWithdrawalTransaction { id } => { + let mut drive_operations = vec![]; + + let path = get_withdrawal_transactions_queue_path(); + + drive.batch_delete( + path, + &id, + // we know that we are not deleting a subtree + BatchDeleteApplyType::StatefulBatchDelete { + is_known_to_be_subtree_with_sum: Some((false, false)), + }, + transaction, + &mut drive_operations, + )?; + + Ok(drive_operations) + } + } + } +} diff --git a/packages/rs-drive/src/drive/contract/mod.rs b/packages/rs-drive/src/drive/contract/mod.rs index b79470dc7ea..b4dd0adbdc3 100644 --- a/packages/rs-drive/src/drive/contract/mod.rs +++ b/packages/rs-drive/src/drive/contract/mod.rs @@ -1037,7 +1037,7 @@ mod tests { use crate::drive::Drive; use dpp::data_contract::extra::common::json_document_to_cbor; - use crate::common::helpers::setup::setup_drive_with_initial_state_structure; + use crate::tests::helpers::setup::setup_drive_with_initial_state_structure; fn setup_deep_nested_50_contract() -> (Drive, Contract, Vec) { // Todo: make TempDir based on _prefix diff --git a/packages/rs-drive/src/drive/document/update.rs b/packages/rs-drive/src/drive/document/update.rs index e245b17d842..17eab0a6f39 100644 --- a/packages/rs-drive/src/drive/document/update.rs +++ b/packages/rs-drive/src/drive/document/update.rs @@ -35,6 +35,7 @@ use std::borrow::Cow; use std::collections::{HashMap, HashSet}; +use dpp::data_contract::document_type::DocumentType; use dpp::data_contract::DriveContractExt; use grovedb::batch::key_info::KeyInfo; use grovedb::batch::key_info::KeyInfo::KnownKey; @@ -42,6 +43,10 @@ use grovedb::batch::KeyInfoPath; use grovedb::{Element, EstimatedLayerInformation, TransactionArg}; use crate::contract::Contract; +use crate::drive::batch::drive_op_batch::{ + DocumentOperation, DocumentOperationsForContractDocumentType, UpdateOperationInfo, +}; +use crate::drive::batch::{DocumentOperationType, DriveOperationType}; use crate::drive::defaults::CONTRACT_DOCUMENTS_PATH_HEIGHT; use crate::drive::document::{ contract_document_type_path, @@ -73,6 +78,7 @@ use crate::drive::grove_operations::{ QueryType, }; use crate::fee::result::FeeResult; +use dpp::prelude::DataContract; impl Drive { /// Updates a serialized document given a contract CBOR and returns the associated fee. @@ -295,126 +301,181 @@ impl Drive { let contract = document_and_contract_info.contract; let document_type = document_and_contract_info.document_type; let owner_id = document_and_contract_info.owned_document_info.owner_id; + let Some((document, storage_flags)) = document_and_contract_info.owned_document_info.document_info.get_borrowed_document_and_storage_flags() else { + return Err(Error::Drive(DriveError::CorruptedCodeExecution("must have document and storage flags"))); + }; + // we need to construct the path for documents on the contract + // the path is + // * Document and Contract root tree + // * Contract ID recovered from document + // * 0 to signify Documents and not Contract + let contract_document_type_path = + contract_document_type_path(contract.id.as_bytes(), document_type.name.as_str()); + + let contract_documents_primary_key_path = contract_documents_primary_key_path( + contract.id.as_bytes(), + document_type.name.as_str(), + ); - if let DocumentRefAndSerialization((document, _serialized_document, ref storage_flags)) = - document_and_contract_info.owned_document_info.document_info - { - // we need to construct the path for documents on the contract - // the path is - // * Document and Contract root tree - // * Contract ID recovered from document - // * 0 to signify Documents and not Contract - let contract_document_type_path = - contract_document_type_path(contract.id.as_bytes(), document_type.name.as_str()); - - let contract_documents_primary_key_path = contract_documents_primary_key_path( - contract.id.as_bytes(), - document_type.name.as_str(), - ); - - let document_reference = make_document_reference( - document, - document_and_contract_info.document_type, - storage_flags.as_ref().map(|flags| flags.as_ref()), - ); + let document_reference = make_document_reference( + document, + document_and_contract_info.document_type, + storage_flags, + ); - // next we need to get the old document from storage - let old_document_element = if document_type.documents_keep_history { - let contract_documents_keeping_history_primary_key_path_for_document_id = - contract_documents_keeping_history_primary_key_path_for_document_id( - contract.id.as_bytes(), - document_type.name.as_str(), - document.id.as_slice(), - ); - // When keeping document history the 0 is a reference that points to the current value - // O is just on one byte, so we have at most one hop of size 1 (1 byte) - self.grove_get( - contract_documents_keeping_history_primary_key_path_for_document_id, - &[0], - QueryType::StatefulQuery, - transaction, - &mut batch_operations, - )? - } else { - self.grove_get_raw( - contract_documents_primary_key_path, + // next we need to get the old document from storage + let old_document_element = if document_type.documents_keep_history { + let contract_documents_keeping_history_primary_key_path_for_document_id = + contract_documents_keeping_history_primary_key_path_for_document_id( + contract.id.as_bytes(), + document_type.name.as_str(), document.id.as_slice(), - DirectQueryType::StatefulDirectQuery, - transaction, - &mut batch_operations, - )? - }; - - // we need to store the document for it's primary key - // we should be overriding if the document_type does not have history enabled - self.add_document_to_primary_storage( - &document_and_contract_info, - block_info, - true, - estimated_costs_only_with_layer_info, + ); + // When keeping document history the 0 is a reference that points to the current value + // O is just on one byte, so we have at most one hop of size 1 (1 byte) + self.grove_get( + contract_documents_keeping_history_primary_key_path_for_document_id, + &[0], + QueryType::StatefulQuery, transaction, &mut batch_operations, - )?; - - let old_document_info = if let Some(old_document_element) = old_document_element { - if let Element::Item(old_serialized_document, element_flags) = old_document_element - { - let document = DocumentStub::from_cbor( - old_serialized_document.as_slice(), - None, - owner_id, - )?; - let storage_flags = StorageFlags::map_some_element_flags_ref(&element_flags)?; - Ok(DocumentWithoutSerialization(( - document, - storage_flags.map(Cow::Owned), - ))) - } else { - Err(Error::Drive(DriveError::CorruptedDocumentNotItem( - "old document is not an item", - ))) - }? + )? + } else { + self.grove_get_raw( + contract_documents_primary_key_path, + document.id.as_slice(), + DirectQueryType::StatefulDirectQuery, + transaction, + &mut batch_operations, + )? + }; + + // we need to store the document for it's primary key + // we should be overriding if the document_type does not have history enabled + self.add_document_to_primary_storage( + &document_and_contract_info, + block_info, + true, + estimated_costs_only_with_layer_info, + transaction, + &mut batch_operations, + )?; + + let old_document_info = if let Some(old_document_element) = old_document_element { + if let Element::Item(old_serialized_document, element_flags) = old_document_element { + let document_result = + DocumentStub::from_cbor(old_serialized_document.as_slice(), None, owner_id); + let document = match document_result { + Ok(document_result) => Ok(document_result), + Err(_) => { + DocumentStub::from_bytes(old_serialized_document.as_slice(), document_type) + } + }?; + let storage_flags = StorageFlags::map_some_element_flags_ref(&element_flags)?; + Ok(DocumentWithoutSerialization(( + document, + storage_flags.map(Cow::Owned), + ))) } else { - return Err(Error::Drive(DriveError::UpdatingDocumentThatDoesNotExist( - "document being updated does not exist", - ))); + Err(Error::Drive(DriveError::CorruptedDocumentNotItem( + "old document is not an item", + ))) + }? + } else { + return Err(Error::Drive(DriveError::UpdatingDocumentThatDoesNotExist( + "document being updated does not exist", + ))); + }; + + let mut batch_insertion_cache: HashSet>> = HashSet::new(); + // fourth we need to store a reference to the document for each index + for index in &document_type.indices { + // at this point the contract path is to the contract documents + // for each index the top index component will already have been added + // when the contract itself was created + let mut index_path: Vec> = contract_document_type_path + .iter() + .map(|&x| Vec::from(x)) + .collect(); + let top_index_property = index.properties.get(0).ok_or(Error::Drive( + DriveError::CorruptedContractIndexes("invalid contract indices"), + ))?; + index_path.push(Vec::from(top_index_property.name.as_bytes())); + + // with the example of the dashpay contract's first index + // the index path is now something like Contracts/ContractID/Documents(1)/$ownerId + let document_top_field = document + .get_raw_for_document_type(&top_index_property.name, document_type, owner_id)? + .unwrap_or_default(); + + let old_document_top_field = old_document_info + .get_raw_for_document_type(&top_index_property.name, document_type, owner_id, None)? + .unwrap_or_default(); + + // if we are not applying that means we are trying to get worst case costs + // which would entail a change on every index + let mut change_occurred_on_index = match &old_document_top_field { + DriveKeyInfo::Key(k) => &document_top_field != k, + DriveKeyInfo::KeyRef(k) => document_top_field.as_slice() != *k, + DriveKeyInfo::KeySize(_) => { + // we should assume true in this worst case cost scenario + true + } }; - let mut batch_insertion_cache: HashSet>> = HashSet::new(); - // fourth we need to store a reference to the document for each index - for index in &document_type.indices { - // at this point the contract path is to the contract documents - // for each index the top index component will already have been added - // when the contract itself was created - let mut index_path: Vec> = contract_document_type_path - .iter() - .map(|&x| Vec::from(x)) - .collect(); - let top_index_property = index.properties.get(0).ok_or(Error::Drive( + if change_occurred_on_index { + // here we are inserting an empty tree that will have a subtree of all other index properties + let mut qualified_path = index_path.clone(); + qualified_path.push(document_top_field.clone()); + + if !batch_insertion_cache.contains(&qualified_path) { + let inserted = self.batch_insert_empty_tree_if_not_exists( + PathKeyInfo::PathKeyRef::<0>(( + index_path.clone(), + document_top_field.as_slice(), + )), + storage_flags, + BatchInsertTreeApplyType::StatefulBatchInsertTree, + transaction, + previous_batch_operations, + &mut batch_operations, + )?; + if inserted { + batch_insertion_cache.insert(qualified_path); + } + } + } + + let mut all_fields_null = document_top_field.is_empty(); + + let mut old_index_path: Vec = index_path + .iter() + .map(|path_item| DriveKeyInfo::Key(path_item.clone())) + .collect(); + // we push the actual value of the index path + index_path.push(document_top_field); + // the index path is now something like Contracts/ContractID/Documents(1)/$ownerId/ + + old_index_path.push(old_document_top_field); + + for i in 1..index.properties.len() { + let index_property = index.properties.get(i).ok_or(Error::Drive( DriveError::CorruptedContractIndexes("invalid contract indices"), ))?; - index_path.push(Vec::from(top_index_property.name.as_bytes())); - // with the example of the dashpay contract's first index - // the index path is now something like Contracts/ContractID/Documents(1)/$ownerId - let document_top_field = document - .get_raw_for_document_type(&top_index_property.name, document_type, owner_id)? + let document_index_field = document + .get_raw_for_document_type(&index_property.name, document_type, owner_id)? .unwrap_or_default(); - let old_document_top_field = old_document_info - .get_raw_for_document_type( - &top_index_property.name, - document_type, - owner_id, - None, - )? + let old_document_index_field = old_document_info + .get_raw_for_document_type(&index_property.name, document_type, owner_id, None)? .unwrap_or_default(); // if we are not applying that means we are trying to get worst case costs // which would entail a change on every index - let mut change_occurred_on_index = match &old_document_top_field { - DriveKeyInfo::Key(k) => &document_top_field != k, - DriveKeyInfo::KeyRef(k) => document_top_field.as_slice() != *k, + change_occurred_on_index |= match &old_document_index_field { + DriveKeyInfo::Key(k) => &document_index_field != k, + DriveKeyInfo::KeyRef(k) => document_index_field != *k, DriveKeyInfo::KeySize(_) => { // we should assume true in this worst case cost scenario true @@ -423,16 +484,17 @@ impl Drive { if change_occurred_on_index { // here we are inserting an empty tree that will have a subtree of all other index properties + let mut qualified_path = index_path.clone(); - qualified_path.push(document_top_field.clone()); + qualified_path.push(index_property.name.as_bytes().to_vec()); if !batch_insertion_cache.contains(&qualified_path) { let inserted = self.batch_insert_empty_tree_if_not_exists( PathKeyInfo::PathKeyRef::<0>(( index_path.clone(), - document_top_field.as_slice(), + index_property.name.as_bytes(), )), - storage_flags.as_ref().map(|flags| flags.as_ref()), + storage_flags, BatchInsertTreeApplyType::StatefulBatchInsertTree, transaction, previous_batch_operations, @@ -444,200 +506,166 @@ impl Drive { } } - let mut all_fields_null = document_top_field.is_empty(); - - let mut old_index_path: Vec = index_path - .iter() - .map(|path_item| DriveKeyInfo::Key(path_item.clone())) - .collect(); - // we push the actual value of the index path - index_path.push(document_top_field); - // the index path is now something like Contracts/ContractID/Documents(1)/$ownerId/ - - old_index_path.push(old_document_top_field); - - for i in 1..index.properties.len() { - let index_property = index.properties.get(i).ok_or(Error::Drive( - DriveError::CorruptedContractIndexes("invalid contract indices"), - ))?; - - let document_index_field = document - .get_raw_for_document_type(&index_property.name, document_type, owner_id)? - .unwrap_or_default(); - - let old_document_index_field = old_document_info - .get_raw_for_document_type( - &index_property.name, - document_type, - owner_id, - None, - )? - .unwrap_or_default(); - - // if we are not applying that means we are trying to get worst case costs - // which would entail a change on every index - change_occurred_on_index |= match &old_document_index_field { - DriveKeyInfo::Key(k) => &document_index_field != k, - DriveKeyInfo::KeyRef(k) => document_index_field != *k, - DriveKeyInfo::KeySize(_) => { - // we should assume true in this worst case cost scenario - true - } - }; - - if change_occurred_on_index { - // here we are inserting an empty tree that will have a subtree of all other index properties - - let mut qualified_path = index_path.clone(); - qualified_path.push(index_property.name.as_bytes().to_vec()); - - if !batch_insertion_cache.contains(&qualified_path) { - let inserted = self.batch_insert_empty_tree_if_not_exists( - PathKeyInfo::PathKeyRef::<0>(( - index_path.clone(), - index_property.name.as_bytes(), - )), - storage_flags.as_ref().map(|flags| flags.as_ref()), - BatchInsertTreeApplyType::StatefulBatchInsertTree, - transaction, - previous_batch_operations, - &mut batch_operations, - )?; - if inserted { - batch_insertion_cache.insert(qualified_path); - } - } - } - - index_path.push(Vec::from(index_property.name.as_bytes())); - old_index_path - .push(DriveKeyInfo::Key(Vec::from(index_property.name.as_bytes()))); - - // Iteration 1. the index path is now something like Contracts/ContractID/Documents(1)/$ownerId//toUserId - // Iteration 2. the index path is now something like Contracts/ContractID/Documents(1)/$ownerId//toUserId//accountReference - - if change_occurred_on_index { - // here we are inserting an empty tree that will have a subtree of all other index properties - - let mut qualified_path = index_path.clone(); - qualified_path.push(document_index_field.clone()); - - if !batch_insertion_cache.contains(&qualified_path) { - let inserted = self.batch_insert_empty_tree_if_not_exists( - PathKeyInfo::PathKeyRef::<0>(( - index_path.clone(), - document_index_field.as_slice(), - )), - storage_flags.as_ref().map(|flags| flags.as_ref()), - BatchInsertTreeApplyType::StatefulBatchInsertTree, - transaction, - previous_batch_operations, - &mut batch_operations, - )?; - if inserted { - batch_insertion_cache.insert(qualified_path); - } - } - } - - all_fields_null &= document_index_field.is_empty(); + index_path.push(Vec::from(index_property.name.as_bytes())); + old_index_path.push(DriveKeyInfo::Key(Vec::from(index_property.name.as_bytes()))); - // we push the actual value of the index path, both for the new and the old - index_path.push(document_index_field); - old_index_path.push(old_document_index_field); - // Iteration 1. the index path is now something like Contracts/ContractID/Documents(1)/$ownerId//toUserId// - // Iteration 2. the index path is now something like Contracts/ContractID/Documents(1)/$ownerId//toUserId//accountReference/ - } + // Iteration 1. the index path is now something like Contracts/ContractID/Documents(1)/$ownerId//toUserId + // Iteration 2. the index path is now something like Contracts/ContractID/Documents(1)/$ownerId//toUserId//accountReference if change_occurred_on_index { - // we first need to delete the old values - // unique indexes will be stored under key "0" - // non unique indices should have a tree at key "0" that has all elements based off of primary key - - let mut key_info_path = KeyInfoPath::from_vec( - old_index_path - .into_iter() - .map(|key_info| match key_info { - Key(key) => KnownKey(key), - KeyRef(key_ref) => KnownKey(key_ref.to_vec()), - KeySize(key_info) => key_info, - }) - .collect::>(), - ); - - if !index.unique { - key_info_path.push(KnownKey(vec![0])); - - // here we should return an error if the element already exists - self.batch_delete_up_tree_while_empty( - key_info_path, - document.id.as_slice(), - Some(CONTRACT_DOCUMENTS_PATH_HEIGHT), - BatchDeleteUpTreeApplyType::StatefulBatchDelete { - is_known_to_be_subtree_with_sum: Some((false, false)), - }, - transaction, - previous_batch_operations, - &mut batch_operations, - )?; - } else { - // here we should return an error if the element already exists - self.batch_delete_up_tree_while_empty( - key_info_path, - &[0], - Some(CONTRACT_DOCUMENTS_PATH_HEIGHT), - BatchDeleteUpTreeApplyType::StatefulBatchDelete { - is_known_to_be_subtree_with_sum: Some((false, false)), - }, - transaction, - previous_batch_operations, - &mut batch_operations, - )?; - } + // here we are inserting an empty tree that will have a subtree of all other index properties - // unique indexes will be stored under key "0" - // non unique indices should have a tree at key "0" that has all elements based off of primary key - if !index.unique || all_fields_null { - // here we are inserting an empty tree that will have a subtree of all other index properties - self.batch_insert_empty_tree_if_not_exists( - PathKeyInfo::PathKeyRef::<0>((index_path.clone(), &[0])), - storage_flags.as_ref().map(|flags| flags.as_ref()), + let mut qualified_path = index_path.clone(); + qualified_path.push(document_index_field.clone()); + + if !batch_insertion_cache.contains(&qualified_path) { + let inserted = self.batch_insert_empty_tree_if_not_exists( + PathKeyInfo::PathKeyRef::<0>(( + index_path.clone(), + document_index_field.as_slice(), + )), + storage_flags, BatchInsertTreeApplyType::StatefulBatchInsertTree, transaction, previous_batch_operations, &mut batch_operations, )?; - index_path.push(vec![0]); - - // here we should return an error if the element already exists - self.batch_insert( - PathKeyRefElement::<0>(( - index_path, - document.id.as_slice(), - document_reference.clone(), - )), - &mut batch_operations, - )?; - } else { - // in one update you can't insert an element twice, so need to check the cache - // here we should return an error if the element already exists - let inserted = self.batch_insert_if_not_exists( - PathKeyRefElement::<0>((index_path, &[0], document_reference.clone())), - BatchInsertApplyType::StatefulBatchInsert, - transaction, - &mut batch_operations, - )?; - if !inserted { - return Err(Error::Drive(DriveError::CorruptedContractIndexes( - "index already exists", - ))); + if inserted { + batch_insertion_cache.insert(qualified_path); } } } + + all_fields_null &= document_index_field.is_empty(); + + // we push the actual value of the index path, both for the new and the old + index_path.push(document_index_field); + old_index_path.push(old_document_index_field); + // Iteration 1. the index path is now something like Contracts/ContractID/Documents(1)/$ownerId//toUserId// + // Iteration 2. the index path is now something like Contracts/ContractID/Documents(1)/$ownerId//toUserId//accountReference/ + } + + if change_occurred_on_index { + // we first need to delete the old values + // unique indexes will be stored under key "0" + // non unique indices should have a tree at key "0" that has all elements based off of primary key + + let mut key_info_path = KeyInfoPath::from_vec( + old_index_path + .into_iter() + .map(|key_info| match key_info { + Key(key) => KnownKey(key), + KeyRef(key_ref) => KnownKey(key_ref.to_vec()), + KeySize(key_info) => key_info, + }) + .collect::>(), + ); + + if !index.unique { + key_info_path.push(KnownKey(vec![0])); + + // here we should return an error if the element already exists + self.batch_delete_up_tree_while_empty( + key_info_path, + document.id.as_slice(), + Some(CONTRACT_DOCUMENTS_PATH_HEIGHT), + BatchDeleteUpTreeApplyType::StatefulBatchDelete { + is_known_to_be_subtree_with_sum: Some((false, false)), + }, + transaction, + previous_batch_operations, + &mut batch_operations, + )?; + } else { + // here we should return an error if the element already exists + self.batch_delete_up_tree_while_empty( + key_info_path, + &[0], + Some(CONTRACT_DOCUMENTS_PATH_HEIGHT), + BatchDeleteUpTreeApplyType::StatefulBatchDelete { + is_known_to_be_subtree_with_sum: Some((false, false)), + }, + transaction, + previous_batch_operations, + &mut batch_operations, + )?; + } + + // unique indexes will be stored under key "0" + // non unique indices should have a tree at key "0" that has all elements based off of primary key + if !index.unique || all_fields_null { + // here we are inserting an empty tree that will have a subtree of all other index properties + self.batch_insert_empty_tree_if_not_exists( + PathKeyInfo::PathKeyRef::<0>((index_path.clone(), &[0])), + storage_flags, + BatchInsertTreeApplyType::StatefulBatchInsertTree, + transaction, + previous_batch_operations, + &mut batch_operations, + )?; + index_path.push(vec![0]); + + // here we should return an error if the element already exists + self.batch_insert( + PathKeyRefElement::<0>(( + index_path, + document.id.as_slice(), + document_reference.clone(), + )), + &mut batch_operations, + )?; + } else { + // in one update you can't insert an element twice, so need to check the cache + // here we should return an error if the element already exists + let inserted = self.batch_insert_if_not_exists( + PathKeyRefElement::<0>((index_path, &[0], document_reference.clone())), + BatchInsertApplyType::StatefulBatchInsert, + transaction, + &mut batch_operations, + )?; + if !inserted { + return Err(Error::Drive(DriveError::CorruptedContractIndexes( + "index already exists", + ))); + } + } } } Ok(batch_operations) } + + /// Add update multiple documents operations + pub fn add_update_multiple_documents_operations<'a>( + &self, + documents: &'a [DocumentStub], + data_contract: &'a DataContract, + document_type: &'a DocumentType, + drive_operation_types: &mut Vec>, + ) { + let operations: Vec = documents + .iter() + .map(|document| { + DocumentOperation::UpdateOperation(UpdateOperationInfo { + document, + serialized_document: None, + owner_id: None, + storage_flags: None, + }) + }) + .collect(); + + if !operations.is_empty() { + drive_operation_types.push(DriveOperationType::DocumentOperation( + DocumentOperationType::MultipleDocumentOperationsForSameContractDocumentType { + document_operations: DocumentOperationsForContractDocumentType { + operations, + contract: data_contract, + document_type, + }, + }, + )); + } + } } #[cfg(test)] diff --git a/packages/rs-drive/src/drive/fee_pools/epochs/credit_distribution_pools.rs b/packages/rs-drive/src/drive/fee_pools/epochs/credit_distribution_pools.rs index daa8be714ab..be00cbbee8a 100644 --- a/packages/rs-drive/src/drive/fee_pools/epochs/credit_distribution_pools.rs +++ b/packages/rs-drive/src/drive/fee_pools/epochs/credit_distribution_pools.rs @@ -147,9 +147,9 @@ impl Drive { mod tests { use super::*; - use crate::common::helpers::setup::setup_drive_with_initial_state_structure; use crate::drive::batch::GroveDbOpBatch; use crate::fee_pools::epochs_root_tree_key_constants::KEY_STORAGE_FEE_POOL; + use crate::tests::helpers::setup::setup_drive_with_initial_state_structure; mod get_epoch_storage_credits_for_distribution { use super::*; diff --git a/packages/rs-drive/src/drive/fee_pools/epochs/mod.rs b/packages/rs-drive/src/drive/fee_pools/epochs/mod.rs index ff07c475330..4e49e71c0f2 100644 --- a/packages/rs-drive/src/drive/fee_pools/epochs/mod.rs +++ b/packages/rs-drive/src/drive/fee_pools/epochs/mod.rs @@ -59,7 +59,7 @@ impl Drive { mod tests { use super::*; - use crate::common::helpers::setup::setup_drive_with_initial_state_structure; + use crate::tests::helpers::setup::setup_drive_with_initial_state_structure; mod is_epoch_tree_exists { use super::*; diff --git a/packages/rs-drive/src/drive/fee_pools/epochs/proposers.rs b/packages/rs-drive/src/drive/fee_pools/epochs/proposers.rs index 7f4067ba385..68678520826 100644 --- a/packages/rs-drive/src/drive/fee_pools/epochs/proposers.rs +++ b/packages/rs-drive/src/drive/fee_pools/epochs/proposers.rs @@ -148,8 +148,8 @@ impl Drive { #[cfg(test)] mod tests { use super::*; - use crate::common::helpers::setup::setup_drive_with_initial_state_structure; use crate::drive::batch::GroveDbOpBatch; + use crate::tests::helpers::setup::setup_drive_with_initial_state_structure; mod get_epochs_proposer_block_count { use super::*; diff --git a/packages/rs-drive/src/drive/fee_pools/epochs/start_block.rs b/packages/rs-drive/src/drive/fee_pools/epochs/start_block.rs index 626baa4dc46..591661c32a3 100644 --- a/packages/rs-drive/src/drive/fee_pools/epochs/start_block.rs +++ b/packages/rs-drive/src/drive/fee_pools/epochs/start_block.rs @@ -144,7 +144,7 @@ impl Drive { #[cfg(test)] mod tests { use super::*; - use crate::common::helpers::setup::setup_drive_with_initial_state_structure; + use crate::tests::helpers::setup::setup_drive_with_initial_state_structure; mod get_epoch_start_block_height { use super::*; diff --git a/packages/rs-drive/src/drive/fee_pools/epochs/start_time.rs b/packages/rs-drive/src/drive/fee_pools/epochs/start_time.rs index 5db343933c0..4e0cbe1cd17 100644 --- a/packages/rs-drive/src/drive/fee_pools/epochs/start_time.rs +++ b/packages/rs-drive/src/drive/fee_pools/epochs/start_time.rs @@ -72,7 +72,7 @@ impl Drive { #[cfg(test)] mod tests { - use crate::common::helpers::setup::setup_drive_with_initial_state_structure; + use crate::tests::helpers::setup::setup_drive_with_initial_state_structure; use super::*; diff --git a/packages/rs-drive/src/drive/fee_pools/mod.rs b/packages/rs-drive/src/drive/fee_pools/mod.rs index 92924a8dc32..8a154790ce0 100644 --- a/packages/rs-drive/src/drive/fee_pools/mod.rs +++ b/packages/rs-drive/src/drive/fee_pools/mod.rs @@ -181,7 +181,7 @@ impl Drive { mod tests { use super::*; - use crate::common::helpers::setup::setup_drive_with_initial_state_structure; + use crate::tests::helpers::setup::setup_drive_with_initial_state_structure; mod add_update_epoch_storage_fee_pools_operations { use super::*; diff --git a/packages/rs-drive/src/drive/fee_pools/pending_epoch_refunds.rs b/packages/rs-drive/src/drive/fee_pools/pending_epoch_refunds.rs index f2b90110c30..5376da02281 100644 --- a/packages/rs-drive/src/drive/fee_pools/pending_epoch_refunds.rs +++ b/packages/rs-drive/src/drive/fee_pools/pending_epoch_refunds.rs @@ -219,7 +219,7 @@ pub fn add_update_pending_epoch_refunds_operations( #[cfg(test)] mod tests { use super::*; - use crate::common::helpers::setup::setup_drive_with_initial_state_structure; + use crate::tests::helpers::setup::setup_drive_with_initial_state_structure; mod fetch_and_add_pending_epoch_refunds_to_collection { use super::*; diff --git a/packages/rs-drive/src/drive/fee_pools/storage_fee_distribution_pool.rs b/packages/rs-drive/src/drive/fee_pools/storage_fee_distribution_pool.rs index de98f680bb5..59639002632 100644 --- a/packages/rs-drive/src/drive/fee_pools/storage_fee_distribution_pool.rs +++ b/packages/rs-drive/src/drive/fee_pools/storage_fee_distribution_pool.rs @@ -64,7 +64,7 @@ impl Drive { mod tests { use super::*; - use crate::common::helpers::setup::{setup_drive, setup_drive_with_initial_state_structure}; + use crate::tests::helpers::setup::{setup_drive, setup_drive_with_initial_state_structure}; mod get_storage_fees_from_distribution_pool { use super::*; diff --git a/packages/rs-drive/src/drive/fee_pools/unpaid_epoch.rs b/packages/rs-drive/src/drive/fee_pools/unpaid_epoch.rs index c298ea7b448..edd867df892 100644 --- a/packages/rs-drive/src/drive/fee_pools/unpaid_epoch.rs +++ b/packages/rs-drive/src/drive/fee_pools/unpaid_epoch.rs @@ -68,7 +68,7 @@ impl Drive { mod tests { use super::*; - use crate::common::helpers::setup::{setup_drive, setup_drive_with_initial_state_structure}; + use crate::tests::helpers::setup::{setup_drive, setup_drive_with_initial_state_structure}; mod get_unpaid_epoch_index { use super::*; diff --git a/packages/rs-drive/src/drive/genesis_time.rs b/packages/rs-drive/src/drive/genesis_time.rs index 8dc0488bbca..ca9f41ba713 100644 --- a/packages/rs-drive/src/drive/genesis_time.rs +++ b/packages/rs-drive/src/drive/genesis_time.rs @@ -77,7 +77,7 @@ impl Drive { mod tests { use super::*; - use crate::common::helpers::setup::{setup_drive, setup_drive_with_initial_state_structure}; + use crate::tests::helpers::setup::{setup_drive, setup_drive_with_initial_state_structure}; mod get_genesis_time { use super::*; diff --git a/packages/rs-drive/src/drive/identity/balance/prove.rs b/packages/rs-drive/src/drive/identity/balance/prove.rs index 9450a5b1900..abc394d7206 100644 --- a/packages/rs-drive/src/drive/identity/balance/prove.rs +++ b/packages/rs-drive/src/drive/identity/balance/prove.rs @@ -28,11 +28,12 @@ impl Drive { #[cfg(test)] mod tests { use super::*; - use crate::common::helpers::setup::setup_drive_with_initial_state_structure; use crate::drive::block_info::BlockInfo; + use crate::tests::helpers::setup::setup_drive_with_initial_state_structure; use dpp::identity::Identity; mod prove_identity_balance { + use super::*; #[test] diff --git a/packages/rs-drive/src/drive/identity/balance/update.rs b/packages/rs-drive/src/drive/identity/balance/update.rs index fab2b9794f8..b1f98fd5e9a 100644 --- a/packages/rs-drive/src/drive/identity/balance/update.rs +++ b/packages/rs-drive/src/drive/identity/balance/update.rs @@ -471,13 +471,15 @@ mod tests { use super::*; use dpp::prelude::*; - use crate::common::helpers::setup::setup_drive_with_initial_state_structure; - use crate::fee_pools::epochs::Epoch; + use crate::{ + common::helpers::identities::create_test_identity, + tests::helpers::setup::setup_drive_with_initial_state_structure, + }; + mod add_to_identity_balance { use super::*; - use crate::common::helpers::identities::create_test_identity; #[test] fn should_add_to_balance() { diff --git a/packages/rs-drive/src/drive/identity/fetch/fetch_by_public_key_hashes.rs b/packages/rs-drive/src/drive/identity/fetch/fetch_by_public_key_hashes.rs index f4c2b720825..33560d152a3 100644 --- a/packages/rs-drive/src/drive/identity/fetch/fetch_by_public_key_hashes.rs +++ b/packages/rs-drive/src/drive/identity/fetch/fetch_by_public_key_hashes.rs @@ -278,8 +278,8 @@ impl Drive { #[cfg(test)] mod tests { - use crate::common::helpers::setup::setup_drive; use crate::drive::block_info::BlockInfo; + use crate::tests::helpers::setup::setup_drive; use super::*; diff --git a/packages/rs-drive/src/drive/identity/fetch/full_identity.rs b/packages/rs-drive/src/drive/identity/fetch/full_identity.rs index 249ae620099..a6c42ddc93a 100644 --- a/packages/rs-drive/src/drive/identity/fetch/full_identity.rs +++ b/packages/rs-drive/src/drive/identity/fetch/full_identity.rs @@ -152,7 +152,7 @@ impl Drive { #[cfg(test)] mod tests { use super::*; - use crate::common::helpers::setup::setup_drive_with_initial_state_structure; + use crate::tests::helpers::setup::setup_drive_with_initial_state_structure; mod fetch_full_identities { use super::*; diff --git a/packages/rs-drive/src/drive/identity/fetch/prove/full_identities_by_public_key_hashes.rs b/packages/rs-drive/src/drive/identity/fetch/prove/full_identities_by_public_key_hashes.rs index c6d54d02c0a..1db2ee31859 100644 --- a/packages/rs-drive/src/drive/identity/fetch/prove/full_identities_by_public_key_hashes.rs +++ b/packages/rs-drive/src/drive/identity/fetch/prove/full_identities_by_public_key_hashes.rs @@ -56,8 +56,8 @@ impl Drive { #[cfg(test)] mod tests { use super::*; - use crate::common::helpers::setup::setup_drive_with_initial_state_structure; use crate::drive::block_info::BlockInfo; + use crate::tests::helpers::setup::setup_drive_with_initial_state_structure; use dpp::identity::Identity; use std::collections::BTreeMap; diff --git a/packages/rs-drive/src/drive/identity/fetch/prove/full_identity.rs b/packages/rs-drive/src/drive/identity/fetch/prove/full_identity.rs index fd3443bd357..48a386aa7be 100644 --- a/packages/rs-drive/src/drive/identity/fetch/prove/full_identity.rs +++ b/packages/rs-drive/src/drive/identity/fetch/prove/full_identity.rs @@ -30,8 +30,8 @@ impl Drive { #[cfg(test)] mod tests { use super::*; - use crate::common::helpers::setup::setup_drive_with_initial_state_structure; use crate::drive::block_info::BlockInfo; + use crate::tests::helpers::setup::setup_drive_with_initial_state_structure; use dpp::identity::Identity; use grovedb::query_result_type::QueryResultType; diff --git a/packages/rs-drive/src/drive/identity/fetch/prove/identity_ids_by_public_key_hashes.rs b/packages/rs-drive/src/drive/identity/fetch/prove/identity_ids_by_public_key_hashes.rs index d38600ec9be..83dbed107ca 100644 --- a/packages/rs-drive/src/drive/identity/fetch/prove/identity_ids_by_public_key_hashes.rs +++ b/packages/rs-drive/src/drive/identity/fetch/prove/identity_ids_by_public_key_hashes.rs @@ -29,8 +29,8 @@ impl Drive { #[cfg(test)] mod tests { use super::*; - use crate::common::helpers::setup::setup_drive_with_initial_state_structure; use crate::drive::block_info::BlockInfo; + use crate::tests::helpers::setup::setup_drive_with_initial_state_structure; use dpp::identity::Identity; use std::collections::BTreeMap; diff --git a/packages/rs-drive/src/drive/identity/insert.rs b/packages/rs-drive/src/drive/identity/insert.rs index 2bdd23db0d0..4162b1be9b2 100644 --- a/packages/rs-drive/src/drive/identity/insert.rs +++ b/packages/rs-drive/src/drive/identity/insert.rs @@ -161,8 +161,7 @@ impl Drive { #[cfg(test)] mod tests { - use crate::common::helpers::setup::setup_drive; - use crate::drive::block_info::BlockInfo; + use crate::{drive::block_info::BlockInfo, tests::helpers::setup::setup_drive}; use dpp::identity::Identity; use tempfile::TempDir; diff --git a/packages/rs-drive/src/drive/identity/key/fetch.rs b/packages/rs-drive/src/drive/identity/key/fetch.rs index 96fbfa5a905..c2b07b5d425 100644 --- a/packages/rs-drive/src/drive/identity/key/fetch.rs +++ b/packages/rs-drive/src/drive/identity/key/fetch.rs @@ -504,8 +504,8 @@ impl Drive { #[cfg(test)] mod tests { - use crate::common::helpers::setup::setup_drive; use crate::drive::block_info::BlockInfo; + use crate::tests::helpers::setup::setup_drive; use dpp::identity::Identity; use super::*; diff --git a/packages/rs-drive/src/drive/identity/key/insert.rs b/packages/rs-drive/src/drive/identity/key/insert.rs index bcb973b6022..0d6a75e441b 100644 --- a/packages/rs-drive/src/drive/identity/key/insert.rs +++ b/packages/rs-drive/src/drive/identity/key/insert.rs @@ -22,7 +22,6 @@ use grovedb::reference_path::ReferencePathType; use grovedb::{Element, EstimatedLayerInformation, TransactionArg}; use integer_encoding::VarInt; -use serde::Serialize; use std::collections::HashMap; pub enum ContractApplyInfo { diff --git a/packages/rs-drive/src/drive/identity/mod.rs b/packages/rs-drive/src/drive/identity/mod.rs index 5a3ea1beea1..06f11a3389c 100644 --- a/packages/rs-drive/src/drive/identity/mod.rs +++ b/packages/rs-drive/src/drive/identity/mod.rs @@ -37,6 +37,9 @@ use crate::drive::RootTree; use dpp::identity::{KeyID, Purpose, SecurityLevel}; +/// Everything related to withdrawals +pub mod withdrawals; + use dpp::identity::Purpose::AUTHENTICATION; use integer_encoding::VarInt; @@ -47,9 +50,8 @@ mod fetch; mod insert; mod key; mod update; -mod withdrawal_queue; -pub use withdrawal_queue::add_initial_withdrawal_state_structure_operations; +pub use withdrawals::paths::add_initial_withdrawal_state_structure_operations; pub(crate) const IDENTITY_KEY: [u8; 1] = [0]; diff --git a/packages/rs-drive/src/drive/identity/update.rs b/packages/rs-drive/src/drive/identity/update.rs index 5ef95937c8c..b24fd9995f6 100644 --- a/packages/rs-drive/src/drive/identity/update.rs +++ b/packages/rs-drive/src/drive/identity/update.rs @@ -270,7 +270,7 @@ mod tests { use super::*; use dpp::prelude::*; - use crate::common::helpers::setup::setup_drive_with_initial_state_structure; + use crate::tests::helpers::setup::setup_drive_with_initial_state_structure; mod add_new_keys_to_identity { use super::*; diff --git a/packages/rs-drive/src/drive/identity/withdrawal_queue.rs b/packages/rs-drive/src/drive/identity/withdrawal_queue.rs deleted file mode 100644 index 3f34963070c..00000000000 --- a/packages/rs-drive/src/drive/identity/withdrawal_queue.rs +++ /dev/null @@ -1,303 +0,0 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. -// - -//! This module defines functions within the Drive struct related to withdrawal transaction (AssetUnlock) -//! - -use std::ops::RangeFull; - -use grovedb::query_result_type::QueryResultType::QueryKeyElementPairResultType; -use grovedb::{Element, PathQuery, Query, QueryItem, SizedQuery, TransactionArg}; - -use crate::drive::batch::GroveDbOpBatch; -use crate::drive::grove_operations::BatchDeleteApplyType; -use crate::drive::{Drive, RootTree}; -use crate::error::drive::DriveError; -use crate::error::Error; -use crate::fee::op::DriveOperation; - -/// constant id for transaction counter -pub const WITHDRAWAL_TRANSACTIONS_COUNTER_ID: [u8; 1] = [0]; -/// constant id for subtree containing transactions queue -pub const WITHDRAWAL_TRANSACTIONS_QUEUE_ID: [u8; 1] = [1]; - -type WithdrawalTransaction = (Vec, Vec); - -/// Add operations for creating initial withdrawal state structure -pub fn add_initial_withdrawal_state_structure_operations(batch: &mut GroveDbOpBatch) { - batch.add_insert( - vec![vec![RootTree::WithdrawalTransactions as u8]], - WITHDRAWAL_TRANSACTIONS_COUNTER_ID.to_vec(), - Element::Item(0u64.to_be_bytes().to_vec(), None), - ); - - batch.add_insert_empty_tree( - vec![vec![RootTree::WithdrawalTransactions as u8]], - WITHDRAWAL_TRANSACTIONS_QUEUE_ID.to_vec(), - ); -} - -impl Drive { - /// Get latest withdrawal index in a queue - pub fn fetch_latest_withdrawal_transaction_index( - &self, - transaction: TransactionArg, - ) -> Result { - let result = self - .grove - .get( - [Into::<&[u8; 1]>::into(RootTree::WithdrawalTransactions).as_slice()], - &WITHDRAWAL_TRANSACTIONS_COUNTER_ID, - transaction, - ) - .unwrap() - .map_err(Error::GroveDB); - - if let Err(Error::GroveDB(grovedb::Error::PathKeyNotFound(_))) = &result { - return Ok(0); - } - - let element = result?; - - if let Element::Item(counter_bytes, _) = element { - let counter = u64::from_be_bytes(counter_bytes.try_into().map_err(|_| { - DriveError::CorruptedWithdrawalTransactionsCounterInvalidLength( - "withdrawal transactions counter must be an u64", - ) - })?); - - Ok(counter) - } else { - Err(Error::Drive( - DriveError::CorruptedWithdrawalTransactionsCounterNotItem( - "withdrawal transactions counter must be an item", - ), - )) - } - } - - /// Add counter update operations to the batch - pub fn add_update_withdrawal_index_counter_operation( - &self, - batch: &mut GroveDbOpBatch, - value: Vec, - ) { - batch.add_insert( - vec![vec![RootTree::WithdrawalTransactions as u8]], - WITHDRAWAL_TRANSACTIONS_COUNTER_ID.to_vec(), - Element::Item(value, None), - ); - } - - /// Add insert operations for withdrawal transactions to the batch - pub fn add_enqueue_withdrawal_transaction_operations( - &self, - batch: &mut GroveDbOpBatch, - withdrawals: Vec<(Vec, Vec)>, - ) { - for (id, bytes) in withdrawals { - batch.add_insert( - vec![ - vec![RootTree::WithdrawalTransactions as u8], - WITHDRAWAL_TRANSACTIONS_QUEUE_ID.to_vec(), - ], - id, - Element::Item(bytes, None), - ); - } - } - - /// Get specified amount of withdrawal transactions from the DB - pub fn dequeue_withdrawal_transactions( - &self, - num_of_transactions: u16, - transaction: TransactionArg, - ) -> Result, Error> { - let mut query = Query::new(); - - query.insert_item(QueryItem::RangeFull(RangeFull)); - - let path_query = PathQuery { - path: vec![ - vec![RootTree::WithdrawalTransactions as u8], - WITHDRAWAL_TRANSACTIONS_QUEUE_ID.to_vec(), - ], - query: SizedQuery { - query, - limit: Some(num_of_transactions), - offset: None, - }, - }; - - let result_items = self - .grove - .query_raw( - &path_query, - transaction.is_some(), - QueryKeyElementPairResultType, - transaction, - ) - .unwrap() - .map_err(Error::GroveDB)? - .0 - .to_key_elements(); - - let withdrawals = result_items - .into_iter() - .map(|(id, element)| match element { - Element::Item(bytes, _) => Ok((id, bytes)), - _ => Err(Error::Drive(DriveError::CorruptedWithdrawalNotItem( - "withdrawal is not an item", - ))), - }) - .collect::, Vec)>, Error>>()?; - - if !withdrawals.is_empty() { - let mut batch_operations: Vec = vec![]; - let mut drive_operations: Vec = vec![]; - - let withdrawals_path: [&[u8]; 2] = [ - Into::<&[u8; 1]>::into(RootTree::WithdrawalTransactions), - &WITHDRAWAL_TRANSACTIONS_QUEUE_ID, - ]; - - for (id, _) in withdrawals.iter() { - self.batch_delete( - withdrawals_path, - id, - // we know that we are not deleting a subtree - BatchDeleteApplyType::StatefulBatchDelete { - is_known_to_be_subtree_with_sum: Some((false, false)), - }, - transaction, - &mut batch_operations, - )?; - } - - self.apply_batch_drive_operations( - None, - transaction, - batch_operations, - &mut drive_operations, - )?; - } - - Ok(withdrawals) - } -} - -#[cfg(test)] -mod tests { - use crate::common::helpers::setup::setup_drive_with_initial_state_structure; - use crate::drive::batch::GroveDbOpBatch; - - mod queue { - use super::*; - - #[test] - fn test_enqueue_and_dequeue() { - let drive = setup_drive_with_initial_state_structure(); - - let transaction = drive.grove.start_transaction(); - - let withdrawals: Vec<(Vec, Vec)> = (0..17) - .map(|i: u8| (i.to_be_bytes().to_vec(), vec![i; 32])) - .collect(); - - let mut batch = GroveDbOpBatch::new(); - - drive.add_enqueue_withdrawal_transaction_operations(&mut batch, withdrawals); - - drive - .grove_apply_batch(batch, true, Some(&transaction)) - .expect("to apply ops"); - - let withdrawals = drive - .dequeue_withdrawal_transactions(16, Some(&transaction)) - .expect("to dequeue withdrawals"); - - assert_eq!(withdrawals.len(), 16); - - let withdrawals = drive - .dequeue_withdrawal_transactions(16, Some(&transaction)) - .expect("to dequeue withdrawals"); - - assert_eq!(withdrawals.len(), 1); - - let withdrawals = drive - .dequeue_withdrawal_transactions(16, Some(&transaction)) - .expect("to dequeue withdrawals"); - - assert_eq!(withdrawals.len(), 0); - } - } - - mod index { - use super::*; - - #[test] - fn test_withdrawal_transaction_counter() { - let drive = setup_drive_with_initial_state_structure(); - - let transaction = drive.grove.start_transaction(); - - let mut batch = GroveDbOpBatch::new(); - - let counter: u64 = 42; - - drive.add_update_withdrawal_index_counter_operation( - &mut batch, - counter.to_be_bytes().to_vec(), - ); - - drive - .grove_apply_batch(batch, false, Some(&transaction)) - .expect("to apply ops"); - - let stored_counter = drive - .fetch_latest_withdrawal_transaction_index(Some(&transaction)) - .expect("to withdraw counter"); - - assert_eq!(stored_counter, counter); - } - - #[test] - fn test_returns_0_if_empty() { - let drive = setup_drive_with_initial_state_structure(); - - let transaction = drive.grove.start_transaction(); - - let stored_counter = drive - .fetch_latest_withdrawal_transaction_index(Some(&transaction)) - .expect("to withdraw counter"); - - assert_eq!(stored_counter, 0); - } - } -} diff --git a/packages/rs-drive/src/drive/identity/withdrawals/documents.rs b/packages/rs-drive/src/drive/identity/withdrawals/documents.rs new file mode 100644 index 00000000000..85ac07c88ef --- /dev/null +++ b/packages/rs-drive/src/drive/identity/withdrawals/documents.rs @@ -0,0 +1,352 @@ +use std::collections::BTreeMap; + +use dpp::data_contract::document_type::random_document::CreateRandomDocument; +use dpp::{ + contracts::withdrawals_contract, data_contract::DriveContractExt, + document::document_stub::DocumentStub, +}; +use grovedb::TransactionArg; +use indexmap::IndexMap; +use lazy_static::__Deref; + +use crate::{ + drive::{query::QueryDocumentsOutcome, Drive}, + error::{drive::DriveError, Error}, + query::{DriveQuery, InternalClauses, OrderClause, WhereClause}, +}; + +impl Drive { + /// Fetch withdrawal documents by it's status + pub fn fetch_withdrawal_documents_by_status( + &self, + status: u8, + transaction: TransactionArg, + ) -> Result, Error> { + let data_contract_id = withdrawals_contract::CONTRACT_ID.deref(); + + let contract_fetch_info = self + .get_contract_with_fetch_info(data_contract_id.to_buffer(), None, transaction)? + .1 + .ok_or_else(|| { + Error::Drive(DriveError::CorruptedCodeExecution( + "Can't fetch data contract", + )) + })?; + + let document_type = contract_fetch_info + .contract + .document_type_for_name(withdrawals_contract::document_types::WITHDRAWAL)?; + + let mut where_clauses = BTreeMap::new(); + + //todo: make this lazy loaded or const + where_clauses.insert( + withdrawals_contract::property_names::STATUS.to_string(), + WhereClause { + field: withdrawals_contract::property_names::STATUS.to_string(), + operator: crate::query::WhereOperator::Equal, + value: ciborium::Value::Integer(status.into()), + }, + ); + + let mut order_by = IndexMap::new(); + + order_by.insert( + withdrawals_contract::property_names::UPDATED_AT.to_string(), + OrderClause { + field: withdrawals_contract::property_names::UPDATED_AT.to_string(), + ascending: true, + }, + ); + + let drive_query = DriveQuery { + contract: &contract_fetch_info.contract, + document_type, + internal_clauses: InternalClauses { + primary_key_in_clause: None, + primary_key_equal_clause: None, + in_clause: None, + range_clause: None, + equal_clauses: where_clauses, + }, + offset: 0, + limit: 100, + order_by, + start_at: None, + start_at_included: false, + block_time: None, + }; + + let QueryDocumentsOutcome { + items, + skipped: _, + cost: _, + } = self.query_documents(drive_query, None, transaction)?; + + let documents = items + .iter() + .map(|document_cbor| { + document_type + .document_from_bytes(document_cbor) + .map_err(|e| { + Error::Drive(DriveError::CorruptedDriveState(format!( + "can't create document from bytes : {e}" + ))) + }) + }) + .collect::, Error>>()?; + + Ok(documents) + } + + /// Find one document by it's transactionId field + pub fn find_withdrawal_document_by_transaction_id( + &self, + original_transaction_id: &[u8], + transaction: TransactionArg, + ) -> Result { + let data_contract_id = withdrawals_contract::CONTRACT_ID.deref(); + + let contract_fetch_info = self + .get_contract_with_fetch_info(data_contract_id.to_buffer(), None, transaction)? + .1 + .ok_or_else(|| { + Error::Drive(DriveError::CorruptedCodeExecution( + "Can't fetch data contract", + )) + })?; + + let document_type = contract_fetch_info + .contract + .document_type_for_name(withdrawals_contract::document_types::WITHDRAWAL)?; + + let mut where_clauses = BTreeMap::new(); + + where_clauses.insert( + withdrawals_contract::property_names::TRANSACTION_ID.to_string(), + WhereClause { + field: withdrawals_contract::property_names::TRANSACTION_ID.to_string(), + operator: crate::query::WhereOperator::Equal, + value: ciborium::Value::Bytes(original_transaction_id.to_vec()), + }, + ); + + where_clauses.insert( + withdrawals_contract::property_names::STATUS.to_string(), + WhereClause { + field: withdrawals_contract::property_names::STATUS.to_string(), + operator: crate::query::WhereOperator::Equal, + value: ciborium::Value::Integer( + (withdrawals_contract::WithdrawalStatus::POOLED as u8).into(), + ), + }, + ); + + let drive_query = DriveQuery { + contract: &contract_fetch_info.contract, + document_type, + internal_clauses: InternalClauses { + primary_key_in_clause: None, + primary_key_equal_clause: None, + in_clause: None, + range_clause: None, + equal_clauses: where_clauses, + }, + offset: 0, + limit: 100, + order_by: IndexMap::new(), + start_at: None, + start_at_included: false, + block_time: None, + }; + + let QueryDocumentsOutcome { + items, + skipped: _, + cost: _, + } = self.query_documents(drive_query, None, transaction)?; + + let documents = items + .iter() + .map(|document_cbor| { + DocumentStub::from_bytes(document_cbor, document_type).map_err(|_| { + Error::Drive(DriveError::CorruptedDriveState( + "can't create document from bytes".to_string(), + )) + }) + }) + .collect::, Error>>()?; + + let document = documents + .get(0) + .ok_or(Error::Drive(DriveError::CorruptedCodeExecution( + "document was not found by transactionId", + )))? + .clone(); + + Ok(document) + } +} + +#[cfg(test)] +mod tests { + use dpp::contracts::withdrawals_contract; + use dpp::prelude::Identifier; + use dpp::tests::fixtures::get_withdrawal_document_fixture; + use serde_json::json; + + use crate::tests::helpers::setup::setup_drive_with_initial_state_structure; + use crate::tests::helpers::setup::{setup_document, setup_system_data_contract}; + + mod fetch_withdrawal_documents_by_status { + use dpp::data_contract::DriveContractExt; + use dpp::identity::state_transition::identity_credit_withdrawal_transition::Pooling; + use dpp::system_data_contracts::{load_system_data_contract, SystemDataContract}; + + use super::*; + + #[test] + fn test_return_list_of_documents() { + let drive = setup_drive_with_initial_state_structure(); + + let transaction = drive.grove.start_transaction(); + + let data_contract = load_system_data_contract(SystemDataContract::Withdrawals) + .expect("to load system data contract"); + + setup_system_data_contract(&drive, &data_contract, Some(&transaction)); + + let documents = drive + .fetch_withdrawal_documents_by_status( + withdrawals_contract::WithdrawalStatus::QUEUED.into(), + Some(&transaction), + ) + .expect("to fetch documents by status"); + + assert_eq!(documents.len(), 0); + + let owner_id = Identifier::new([1u8; 32]); + + let document = get_withdrawal_document_fixture( + &data_contract, + owner_id, + json!({ + "amount": 1000, + "coreFeePerByte": 1, + "pooling": Pooling::Never, + "outputScript": (0..23).collect::>(), + "status": withdrawals_contract::WithdrawalStatus::QUEUED, + "transactionIndex": 1, + }), + ); + + let document_type = data_contract + .document_type_for_name(withdrawals_contract::document_types::WITHDRAWAL) + .expect("expected to get document type"); + + setup_document( + &drive, + &document, + &data_contract, + document_type, + Some(&transaction), + ); + + let document = get_withdrawal_document_fixture( + &data_contract, + owner_id, + json!({ + "amount": 1000, + "coreFeePerByte": 1, + "pooling": Pooling::Never, + "outputScript": (0..23).collect::>(), + "status": withdrawals_contract::WithdrawalStatus::POOLED, + "transactionIndex": 2, + }), + ); + + setup_document( + &drive, + &document, + &data_contract, + document_type, + Some(&transaction), + ); + + let documents = drive + .fetch_withdrawal_documents_by_status( + withdrawals_contract::WithdrawalStatus::QUEUED.into(), + Some(&transaction), + ) + .expect("to fetch documents by status"); + + assert_eq!(documents.len(), 1); + + let documents = drive + .fetch_withdrawal_documents_by_status( + withdrawals_contract::WithdrawalStatus::POOLED.into(), + Some(&transaction), + ) + .expect("to fetch documents by status"); + + assert_eq!(documents.len(), 1); + } + } + + mod find_document_by_transaction_id { + use dpp::data_contract::DriveContractExt; + use dpp::identity::state_transition::identity_credit_withdrawal_transition::Pooling; + use dpp::system_data_contracts::{load_system_data_contract, SystemDataContract}; + + use super::*; + + #[test] + fn test_find_document_by_transaction_id() { + let drive = setup_drive_with_initial_state_structure(); + + let transaction = drive.grove.start_transaction(); + + let data_contract = load_system_data_contract(SystemDataContract::Withdrawals) + .expect("to load system data contract"); + + setup_system_data_contract(&drive, &data_contract, Some(&transaction)); + + let owner_id = Identifier::new([1u8; 32]); + + let document = get_withdrawal_document_fixture( + &data_contract, + owner_id, + json!({ + "amount": 1000, + "coreFeePerByte": 1, + "pooling": Pooling::Never, + "outputScript": (0..23).collect::>(), + "status": withdrawals_contract::WithdrawalStatus::POOLED, + "transactionIndex": 1, + "transactionId": (0..32).collect::>(), + }), + ); + + let document_type = data_contract + .document_type_for_name(withdrawals_contract::document_types::WITHDRAWAL) + .expect("expected to get document type"); + + setup_document( + &drive, + &document, + &data_contract, + document_type, + Some(&transaction), + ); + + let found_document = drive + .find_withdrawal_document_by_transaction_id( + &(0..32).collect::>(), + Some(&transaction), + ) + .expect("to find document by it's transaction id"); + + assert_eq!(found_document.id.to_vec(), document.id.to_vec()); + } + } +} diff --git a/packages/rs-drive/src/drive/identity/withdrawals/mod.rs b/packages/rs-drive/src/drive/identity/withdrawals/mod.rs new file mode 100644 index 00000000000..f5285349513 --- /dev/null +++ b/packages/rs-drive/src/drive/identity/withdrawals/mod.rs @@ -0,0 +1,11 @@ +/// Functions related to updating of a withdrawal status +pub mod documents; +/// Functions and constants related to GroveDB paths +pub mod paths; +/// Functions related to withdrawal queue +pub mod queue; +/// Functions related to transaction index counter +pub mod transaction_index; + +/// Simple type alias for withdrawal transaction with it's id +pub type WithdrawalTransactionIdAndBytes = (Vec, Vec); diff --git a/packages/rs-drive/src/drive/identity/withdrawals/paths.rs b/packages/rs-drive/src/drive/identity/withdrawals/paths.rs new file mode 100644 index 00000000000..6dc3a53a07c --- /dev/null +++ b/packages/rs-drive/src/drive/identity/withdrawals/paths.rs @@ -0,0 +1,71 @@ +use grovedb::Element; + +use crate::drive::{batch::GroveDbOpBatch, RootTree}; + +/// constant id for transaction counter +pub const WITHDRAWAL_TRANSACTIONS_COUNTER_ID: [u8; 1] = [0]; +/// constant id for subtree containing transactions queue +pub const WITHDRAWAL_TRANSACTIONS_QUEUE_ID: [u8; 1] = [1]; +/// constant id for subtree containing expired transaction ids +pub const WITHDRAWAL_TRANSACTIONS_EXPIRED_IDS: [u8; 1] = [2]; + +/// Add operations for creating initial withdrawal state structure +pub fn add_initial_withdrawal_state_structure_operations(batch: &mut GroveDbOpBatch) { + batch.add_insert( + vec![vec![RootTree::WithdrawalTransactions as u8]], + WITHDRAWAL_TRANSACTIONS_COUNTER_ID.to_vec(), + Element::Item(0u64.to_be_bytes().to_vec(), None), + ); + + batch.add_insert_empty_tree( + vec![vec![RootTree::WithdrawalTransactions as u8]], + WITHDRAWAL_TRANSACTIONS_QUEUE_ID.to_vec(), + ); + + batch.add_insert_empty_tree( + vec![vec![RootTree::WithdrawalTransactions as u8]], + WITHDRAWAL_TRANSACTIONS_EXPIRED_IDS.to_vec(), + ); +} + +/// Helper function to get root path +pub fn get_withdrawal_root_path_vec() -> Vec> { + vec![vec![RootTree::WithdrawalTransactions as u8]] +} + +/// Helper function to get root path as u8 +pub fn get_withdrawal_root_path() -> [&'static [u8]; 1] { + [Into::<&[u8; 1]>::into(RootTree::WithdrawalTransactions)] +} + +/// Helper function to get queue path as Vec +pub fn get_withdrawal_transactions_queue_path_vec() -> Vec> { + vec![ + vec![RootTree::WithdrawalTransactions as u8], + WITHDRAWAL_TRANSACTIONS_QUEUE_ID.to_vec(), + ] +} + +/// Helper function to get queue path as [u8] +pub fn get_withdrawal_transactions_queue_path() -> [&'static [u8]; 2] { + [ + Into::<&[u8; 1]>::into(RootTree::WithdrawalTransactions), + &WITHDRAWAL_TRANSACTIONS_QUEUE_ID, + ] +} + +/// Helper function to get expired ids path as Vec +pub fn get_withdrawal_transactions_expired_ids_path_vec() -> Vec> { + vec![ + vec![RootTree::WithdrawalTransactions as u8], + WITHDRAWAL_TRANSACTIONS_EXPIRED_IDS.to_vec(), + ] +} + +/// Helper function to get expired ids path as [u8] +pub fn get_withdrawal_transactions_expired_ids_path() -> [&'static [u8]; 2] { + [ + Into::<&[u8; 1]>::into(RootTree::WithdrawalTransactions), + &WITHDRAWAL_TRANSACTIONS_EXPIRED_IDS, + ] +} diff --git a/packages/rs-drive/src/drive/identity/withdrawals/queue.rs b/packages/rs-drive/src/drive/identity/withdrawals/queue.rs new file mode 100644 index 00000000000..e314d8ddff4 --- /dev/null +++ b/packages/rs-drive/src/drive/identity/withdrawals/queue.rs @@ -0,0 +1,154 @@ +use std::ops::RangeFull; + +use grovedb::{ + query_result_type::QueryResultType, Element, PathQuery, Query, QueryItem, SizedQuery, + TransactionArg, +}; + +use crate::drive::identity::withdrawals::WithdrawalTransactionIdAndBytes; +use crate::{ + drive::{ + batch::{drive_op_batch::WithdrawalOperationType, DriveOperationType}, + Drive, + }, + error::{drive::DriveError, Error}, +}; + +use super::paths::get_withdrawal_transactions_queue_path_vec; + +impl Drive { + /// Add insert operations for withdrawal transactions to the batch + pub fn add_enqueue_withdrawal_transaction_operations<'a>( + &self, + withdrawals: &'a [WithdrawalTransactionIdAndBytes], + drive_operation_types: &mut Vec>, + ) { + if !withdrawals.is_empty() { + drive_operation_types.push(DriveOperationType::WithdrawalOperation( + WithdrawalOperationType::InsertTransactions { + withdrawal_transactions: withdrawals, + }, + )); + } + } + + /// Get specified amount of withdrawal transactions from the DB + pub fn dequeue_withdrawal_transactions( + &self, + max_amount: u16, + transaction: TransactionArg, + drive_operation_types: &mut Vec, + ) -> Result, Error> { + let mut query = Query::new(); + + query.insert_item(QueryItem::RangeFull(RangeFull)); + + let path_query = PathQuery { + path: get_withdrawal_transactions_queue_path_vec(), + query: SizedQuery { + query, + limit: Some(max_amount), + offset: None, + }, + }; + + let result_items = self + .grove + .query_raw( + &path_query, + transaction.is_some(), + QueryResultType::QueryKeyElementPairResultType, + transaction, + ) + .unwrap() + .map_err(Error::GroveDB)? + .0 + .to_key_elements(); + + let withdrawals = result_items + .into_iter() + .map(|(id, element)| match element { + Element::Item(bytes, _) => Ok((id, bytes)), + _ => Err(Error::Drive(DriveError::CorruptedWithdrawalNotItem( + "withdrawal is not an item", + ))), + }) + .collect::, Vec)>, Error>>()?; + + if !withdrawals.is_empty() { + for (id, _) in withdrawals.iter() { + drive_operation_types.push(DriveOperationType::WithdrawalOperation( + WithdrawalOperationType::DeleteWithdrawalTransaction { id: id.clone() }, + )); + } + } + + Ok(withdrawals) + } +} + +#[cfg(test)] +mod tests { + use crate::{ + drive::{batch::DriveOperationType, block_info::BlockInfo}, + fee_pools::epochs::Epoch, + tests::helpers::setup::setup_drive_with_initial_state_structure, + }; + + #[test] + fn test_enqueue_and_dequeue() { + let drive = setup_drive_with_initial_state_structure(); + + let transaction = drive.grove.start_transaction(); + + let withdrawals: Vec<(Vec, Vec)> = (0..17) + .map(|i: u8| (i.to_be_bytes().to_vec(), vec![i; 32])) + .collect(); + + let block_info = BlockInfo { + time_ms: 1, + height: 1, + epoch: Epoch::new(1), + }; + + let mut drive_operations: Vec = vec![]; + + drive.add_enqueue_withdrawal_transaction_operations(&withdrawals, &mut drive_operations); + + drive + .apply_drive_operations(drive_operations, true, &block_info, Some(&transaction)) + .expect("to apply batch"); + + let mut drive_operations: Vec = vec![]; + + let withdrawals = drive + .dequeue_withdrawal_transactions(16, Some(&transaction), &mut drive_operations) + .expect("to dequeue withdrawals"); + + drive + .apply_drive_operations(drive_operations, true, &block_info, Some(&transaction)) + .expect("to apply batch"); + + assert_eq!(withdrawals.len(), 16); + + let mut drive_operations: Vec = vec![]; + + let withdrawals = drive + .dequeue_withdrawal_transactions(16, Some(&transaction), &mut drive_operations) + .expect("to dequeue withdrawals"); + + drive + .apply_drive_operations(drive_operations, true, &block_info, Some(&transaction)) + .expect("to apply batch"); + + assert_eq!(withdrawals.len(), 1); + + let mut drive_operations: Vec = vec![]; + + drive + .dequeue_withdrawal_transactions(16, Some(&transaction), &mut drive_operations) + .expect("to dequeue withdrawals"); + + assert_eq!(drive_operations.len(), 0); + } +} diff --git a/packages/rs-drive/src/drive/identity/withdrawals/transaction_index.rs b/packages/rs-drive/src/drive/identity/withdrawals/transaction_index.rs new file mode 100644 index 00000000000..ef2c38c0662 --- /dev/null +++ b/packages/rs-drive/src/drive/identity/withdrawals/transaction_index.rs @@ -0,0 +1,224 @@ +use grovedb::{ + query_result_type::{QueryResultElement, QueryResultType}, + Element, PathQuery, Query, SizedQuery, TransactionArg, +}; + +use crate::{ + drive::{ + batch::{drive_op_batch::WithdrawalOperationType, DriveOperationType}, + Drive, RootTree, + }, + error::{drive::DriveError, Error}, +}; + +use super::paths::{ + get_withdrawal_transactions_expired_ids_path_vec, WITHDRAWAL_TRANSACTIONS_COUNTER_ID, +}; + +impl Drive { + /// Get and remove latest withdrawal index in a queue + pub fn fetch_and_remove_latest_withdrawal_transaction_index_operations( + &self, + drive_operation_types: &mut Vec, + transaction: TransactionArg, + ) -> Result { + let mut inner_query = Query::new(); + + inner_query.insert_all(); + + let expired_index_query = PathQuery::new( + get_withdrawal_transactions_expired_ids_path_vec(), + SizedQuery::new(inner_query, Some(1), None), + ); + + let (expired_index_elements, _) = self + .grove + .query_raw( + &expired_index_query, + transaction.is_some(), + QueryResultType::QueryKeyElementPairResultType, + transaction, + ) + .unwrap()?; + + if !expired_index_elements.is_empty() { + let expired_index_element_pair = expired_index_elements.elements.get(0).unwrap(); + + if let QueryResultElement::KeyElementPairResultItem((key, _)) = + expired_index_element_pair + { + drive_operation_types.push(DriveOperationType::WithdrawalOperation( + WithdrawalOperationType::DeleteExpiredIndex { key: key.clone() }, + )); + + let index = u64::from_be_bytes(key.clone().try_into().map_err(|_| { + Error::Drive(DriveError::CorruptedCodeExecution( + "Transaction index has wrong length", + )) + })?); + + return Ok(index); + } + } + + let result = self + .grove + .get( + [Into::<&[u8; 1]>::into(RootTree::WithdrawalTransactions).as_slice()], + &WITHDRAWAL_TRANSACTIONS_COUNTER_ID, + transaction, + ) + .unwrap() + .map_err(Error::GroveDB); + + if let Err(Error::GroveDB(grovedb::Error::PathKeyNotFound(_))) = &result { + return Ok(0); + } + + let element = result?; + + if let Element::Item(counter_bytes, _) = element { + let counter = u64::from_be_bytes(counter_bytes.try_into().map_err(|_| { + DriveError::CorruptedWithdrawalTransactionsCounterInvalidLength( + "withdrawal transactions counter must be an u64", + ) + })?); + + Ok(counter) + } else { + Err(Error::Drive( + DriveError::CorruptedWithdrawalTransactionsCounterNotItem( + "withdrawal transactions counter must be an item", + ), + )) + } + } + + /// Add counter update operations to the batch + pub fn add_update_withdrawal_index_counter_operation( + &self, + value: u64, + drive_operation_types: &mut Vec, + ) { + drive_operation_types.push(DriveOperationType::WithdrawalOperation( + WithdrawalOperationType::UpdateIndexCounter { index: value }, + )); + } + + /// Add insert expired counter operations + pub fn add_insert_expired_index_operation( + &self, + transaction_index: u64, + drive_operation_types: &mut Vec, + ) { + drive_operation_types.push(DriveOperationType::WithdrawalOperation( + WithdrawalOperationType::InsertExpiredIndex { + index: transaction_index, + }, + )); + } +} + +#[cfg(test)] +mod tests { + use grovedb::Element; + + use crate::{ + drive::{ + block_info::BlockInfo, + identity::withdrawals::paths::get_withdrawal_transactions_expired_ids_path, + }, + fee_pools::epochs::Epoch, + tests::helpers::setup::setup_drive_with_initial_state_structure, + }; + + #[test] + fn test_withdrawal_transaction_counter() { + let drive = setup_drive_with_initial_state_structure(); + + let transaction = drive.grove.start_transaction(); + + let block_info = BlockInfo { + time_ms: 1, + height: 1, + epoch: Epoch::new(1), + }; + + let mut batch = vec![]; + + let counter: u64 = 42; + + drive.add_update_withdrawal_index_counter_operation(counter, &mut batch); + + drive + .apply_drive_operations(batch, true, &block_info, Some(&transaction)) + .expect("to apply drive ops"); + + let mut batch = vec![]; + + let stored_counter = drive + .fetch_and_remove_latest_withdrawal_transaction_index_operations( + &mut batch, + Some(&transaction), + ) + .expect("to withdraw counter"); + + drive + .apply_drive_operations(batch, true, &block_info, Some(&transaction)) + .expect("to apply drive ops"); + + assert_eq!(stored_counter, counter); + } + + #[test] + fn test_returns_0_if_empty() { + let drive = setup_drive_with_initial_state_structure(); + + let transaction = drive.grove.start_transaction(); + + let mut batch = vec![]; + + let stored_counter = drive + .fetch_and_remove_latest_withdrawal_transaction_index_operations( + &mut batch, + Some(&transaction), + ) + .expect("to withdraw counter"); + + assert_eq!(stored_counter, 0); + } + + #[test] + fn test_should_return_expired_index_if_any() { + let drive = setup_drive_with_initial_state_structure(); + + let transaction = drive.grove.start_transaction(); + + let bytes = 42u64.to_be_bytes(); + + let path = get_withdrawal_transactions_expired_ids_path(); + + drive + .grove + .insert( + path, + &bytes, + Element::Item(bytes.to_vec(), None), + None, + Some(&transaction), + ) + .unwrap() + .expect("to update index counter"); + + let mut batch = vec![]; + + let stored_counter = drive + .fetch_and_remove_latest_withdrawal_transaction_index_operations( + &mut batch, + Some(&transaction), + ) + .expect("to withdraw counter"); + + assert_eq!(stored_counter, 42); + } +} diff --git a/packages/rs-drive/src/drive/object_size_info.rs b/packages/rs-drive/src/drive/object_size_info.rs index 8a440bfa6a5..f8360a3102d 100644 --- a/packages/rs-drive/src/drive/object_size_info.rs +++ b/packages/rs-drive/src/drive/object_size_info.rs @@ -679,6 +679,23 @@ impl<'a> DocumentInfo<'a> { } } + /// Gets the borrowed document + pub fn get_borrowed_document_and_storage_flags( + &self, + ) -> Option<(&DocumentStub, Option<&StorageFlags>)> { + match self { + DocumentInfo::DocumentRefAndSerialization((document, _, storage_flags)) + | DocumentInfo::DocumentRefWithoutSerialization((document, storage_flags)) => { + Some((document, storage_flags.as_ref().map(|flags| flags.as_ref()))) + } + DocumentInfo::DocumentWithoutSerialization((document, storage_flags)) + | DocumentInfo::DocumentAndSerialization((document, _, storage_flags)) => { + Some((document, storage_flags.as_ref().map(|flags| flags.as_ref()))) + } + DocumentInfo::DocumentEstimatedAverageSize(_) => None, + } + } + /// Gets storage flags pub fn get_storage_flags_ref(&self) -> Option<&StorageFlags> { match self { diff --git a/packages/rs-drive/src/drive/protocol_upgrade/mod.rs b/packages/rs-drive/src/drive/protocol_upgrade/mod.rs index d5892a799c7..c33fa857af5 100644 --- a/packages/rs-drive/src/drive/protocol_upgrade/mod.rs +++ b/packages/rs-drive/src/drive/protocol_upgrade/mod.rs @@ -2,7 +2,7 @@ use crate::drive::batch::GroveDbOpBatch; use crate::drive::grove_operations::BatchDeleteApplyType::StatefulBatchDelete; use crate::drive::grove_operations::BatchInsertApplyType; use crate::drive::object_size_info::PathKeyElementInfo; -use crate::drive::system::{misc_path, misc_path_vec}; + use crate::drive::{Drive, RootTree}; use crate::error::drive::DriveError; use crate::error::Error; diff --git a/packages/rs-drive/src/drive/query/mod.rs b/packages/rs-drive/src/drive/query/mod.rs index 678cc56a9bb..e9de2a2af30 100644 --- a/packages/rs-drive/src/drive/query/mod.rs +++ b/packages/rs-drive/src/drive/query/mod.rs @@ -158,7 +158,7 @@ impl Drive { } /// Performs and returns the result of the specified query along with skipped items and the cost. - pub fn query_documents_from_contract_cbor( + pub fn query_raw_documents_from_contract_cbor_using_cbor_encoded_query_with_cost( &self, query_cbor: &[u8], contract_cbor: &[u8], @@ -214,7 +214,7 @@ impl Drive { } /// Performs and returns the result of the specified query along with skipped items and the cost. - pub fn query_documentss_from_contract( + pub fn query_documents_from_contract( &self, contract: &Contract, document_type: &DocumentType, @@ -255,7 +255,7 @@ impl Drive { /// Performs and returns the result of the specified query along with the fee. /// Proof is generated. - pub fn query_documents_as_grove_proof( + pub fn query_proof_of_documents_using_contract_id_using_cbor_encoded_query_with_cost( &self, query_cbor: &[u8], contract_id: [u8; 32], @@ -278,7 +278,7 @@ impl Drive { let document_type = contract .contract .document_type_for_name(document_type_name)?; - let items = self.query_documents_from_contract_as_grove_proof_internal( + let items = self.query_proof_of_documents_using_cbor_encoded_query( &contract.contract, document_type, query_cbor, @@ -296,38 +296,7 @@ impl Drive { /// Performs and returns the result of the specified query along with the fee. /// Proof is generated. - pub fn query_documents_from_contract_cbor_as_grove_proof( - &self, - contract_cbor: &[u8], - document_type_name: String, - query_cbor: &[u8], - block_info: Option, - transaction: TransactionArg, - ) -> Result<(Vec, u64), Error> { - let mut drive_operations: Vec = vec![]; - let contract = ::from_cbor(contract_cbor, None)?; - - let document_type = contract.document_type_for_name(document_type_name.as_str())?; - - let items = self.query_documents_from_contract_as_grove_proof_internal( - &contract, - document_type, - query_cbor, - transaction, - &mut drive_operations, - )?; - let cost = if let Some(block_info) = block_info { - let fee_result = calculate_fee(None, Some(drive_operations), &block_info.epoch)?; - fee_result.processing_fee - } else { - 0 - }; - Ok((items, cost)) - } - - /// Performs and returns the result of the specified query along with the fee. - /// Proof is generated. - pub fn query_documents_from_contract_as_grove_proof( + pub fn query_proof_of_documents_using_cbor_encoded_query_with_cost( &self, contract: &Contract, document_type: &DocumentType, @@ -337,7 +306,7 @@ impl Drive { ) -> Result<(Vec, u64), Error> { let mut drive_operations: Vec = vec![]; - let items = self.query_documents_from_contract_as_grove_proof_internal( + let items = self.query_proof_of_documents_using_cbor_encoded_query( contract, document_type, query_cbor, @@ -355,7 +324,7 @@ impl Drive { /// Performs and returns the result of the specified internal query. /// Proof is generated. - pub(crate) fn query_documents_from_contract_as_grove_proof_internal( + pub(crate) fn query_proof_of_documents_using_cbor_encoded_query( &self, contract: &Contract, document_type: &DocumentType, @@ -369,7 +338,7 @@ impl Drive { } /// Performs the specified internal query and returns the root hash, values, and fee. - pub fn query_documents_from_contract_as_grove_proof_only_get_elements( + pub fn query_proof_of_documents_using_cbor_encoded_query_only_get_elements( &self, contract: &Contract, document_type: &DocumentType, @@ -380,7 +349,7 @@ impl Drive { let mut drive_operations: Vec = vec![]; let (root_hash, items) = self - .query_documents_from_contract_as_grove_proof_only_get_elements_internal( + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements_internal( contract, document_type, query_cbor, @@ -397,7 +366,7 @@ impl Drive { } /// Performs the specified internal query and returns the root hash and values. - pub(crate) fn query_documents_from_contract_as_grove_proof_only_get_elements_internal( + pub(crate) fn query_proof_of_documents_using_cbor_encoded_query_only_get_elements_internal( &self, contract: &Contract, document_type: &DocumentType, diff --git a/packages/rs-drive/src/drive/system/protocol_version.rs b/packages/rs-drive/src/drive/system/protocol_version.rs index 5c1e56e6a03..801d1a43358 100644 --- a/packages/rs-drive/src/drive/system/protocol_version.rs +++ b/packages/rs-drive/src/drive/system/protocol_version.rs @@ -8,8 +8,7 @@ use crate::drive::Drive; use crate::error::drive::DriveError; use crate::error::Error; use crate::fee::op::DriveOperation; -use crate::fee::op::DriveOperation::CalculatedCostOperation; -use costs::CostContext; + use dpp::util::deserializer::ProtocolVersion; use grovedb::{Element, TransactionArg}; use integer_encoding::VarInt; diff --git a/packages/rs-drive/src/fee_pools/epochs/operations_factory.rs b/packages/rs-drive/src/fee_pools/epochs/operations_factory.rs index 055b393b5e7..fb03f4228dc 100644 --- a/packages/rs-drive/src/fee_pools/epochs/operations_factory.rs +++ b/packages/rs-drive/src/fee_pools/epochs/operations_factory.rs @@ -217,7 +217,7 @@ impl Epoch { #[cfg(test)] mod tests { use super::*; - use crate::common::helpers::setup::{setup_drive, setup_drive_with_initial_state_structure}; + use crate::tests::helpers::setup::{setup_drive, setup_drive_with_initial_state_structure}; use chrono::Utc; mod increment_proposer_block_count_operation { diff --git a/packages/rs-drive/src/fee_pools/mod.rs b/packages/rs-drive/src/fee_pools/mod.rs index e6ee6539c69..efcaeaba882 100644 --- a/packages/rs-drive/src/fee_pools/mod.rs +++ b/packages/rs-drive/src/fee_pools/mod.rs @@ -94,7 +94,7 @@ pub fn update_unpaid_epoch_index_operation(epoch_index: EpochIndex) -> GroveDbOp #[cfg(test)] mod tests { use super::*; - use crate::common::helpers::setup::setup_drive_with_initial_state_structure; + use crate::tests::helpers::setup::setup_drive_with_initial_state_structure; mod add_create_fee_pool_trees_operations { use super::*; diff --git a/packages/rs-drive/src/lib.rs b/packages/rs-drive/src/lib.rs index c91d5c6cced..0855f8cdd0a 100644 --- a/packages/rs-drive/src/lib.rs +++ b/packages/rs-drive/src/lib.rs @@ -28,3 +28,7 @@ pub mod query; pub use dpp; /// GroveDB module pub use grovedb; + +/// Test helpers +#[cfg(feature = "fixtures-and-mocks")] +pub mod tests; diff --git a/packages/rs-drive/src/tests/helpers/mod.rs b/packages/rs-drive/src/tests/helpers/mod.rs new file mode 100644 index 00000000000..138906d09f7 --- /dev/null +++ b/packages/rs-drive/src/tests/helpers/mod.rs @@ -0,0 +1 @@ +pub mod setup; diff --git a/packages/rs-drive/src/common/helpers/setup.rs b/packages/rs-drive/src/tests/helpers/setup.rs similarity index 55% rename from packages/rs-drive/src/common/helpers/setup.rs rename to packages/rs-drive/src/tests/helpers/setup.rs index ffb7e0e4fc3..8882009563d 100644 --- a/packages/rs-drive/src/common/helpers/setup.rs +++ b/packages/rs-drive/src/tests/helpers/setup.rs @@ -32,8 +32,17 @@ //! Defines helper functions pertinent to setting up Drive. //! +use crate::drive::block_info::BlockInfo; use crate::drive::config::DriveConfig; use crate::drive::Drive; +use crate::fee_pools::epochs::Epoch; + +use crate::drive::object_size_info::DocumentInfo::DocumentRefWithoutSerialization; +use crate::drive::object_size_info::{DocumentAndContractInfo, OwnedDocumentInfo}; +use dpp::data_contract::document_type::DocumentType; +use dpp::document::document_stub::DocumentStub; +use dpp::prelude::{DataContract, Document}; +use grovedb::TransactionArg; use tempfile::TempDir; /// Struct with options regarding setting up fee pools. @@ -71,3 +80,61 @@ pub fn setup_drive_with_initial_state_structure() -> Drive { drive } + +/// A function to setup system data contract +pub fn setup_system_data_contract( + drive: &Drive, + data_contract: &DataContract, + transaction: TransactionArg, +) { + drive + .apply_contract_cbor( + data_contract.to_cbor().unwrap(), + Some(data_contract.id.to_buffer()), + BlockInfo { + time_ms: 1, + height: 1, + epoch: Epoch::new(1), + }, + true, + None, + transaction, + ) + .unwrap(); +} + +/// Setup document for a contract +pub fn setup_document( + drive: &Drive, + document: &Document, + data_contract: &DataContract, + document_type: &DocumentType, + transaction: TransactionArg, +) { + //todo: remove this hack + let serialized_document = document + .to_buffer() + .expect("expected to serialize to buffer"); + let document_stub = DocumentStub::from_cbor(&serialized_document, None, None) + .expect("expected to convert to document stub"); + drive + .add_document_for_contract( + DocumentAndContractInfo { + owned_document_info: OwnedDocumentInfo { + document_info: DocumentRefWithoutSerialization((&document_stub, None)), + owner_id: None, + }, + contract: data_contract, + document_type, + }, + false, + BlockInfo { + time_ms: 1, + height: 1, + epoch: Epoch::new(1), + }, + true, + transaction, + ) + .unwrap(); +} diff --git a/packages/rs-drive/src/tests/mod.rs b/packages/rs-drive/src/tests/mod.rs new file mode 100644 index 00000000000..b87c65442fc --- /dev/null +++ b/packages/rs-drive/src/tests/mod.rs @@ -0,0 +1,2 @@ +/// Test helpers +pub mod helpers; diff --git a/packages/rs-drive/tests/query_tests.rs b/packages/rs-drive/tests/query_tests.rs index 8fcc214bdf8..727fa5dbe07 100644 --- a/packages/rs-drive/tests/query_tests.rs +++ b/packages/rs-drive/tests/query_tests.rs @@ -47,7 +47,6 @@ use serde_json::json; use tempfile::TempDir; use drive::common; -use drive::common::helpers::setup::setup_drive; use drive::common::setup_contract; use drive::drive::batch::GroveDbOpBatch; use drive::drive::config::DriveConfig; @@ -58,6 +57,8 @@ use drive::drive::object_size_info::{DocumentAndContractInfo, OwnedDocumentInfo} use drive::drive::Drive; use drive::error::{query::QueryError, Error}; use drive::query::DriveQuery; +#[cfg(test)] +use drive::tests::helpers::setup::setup_drive; use dpp::data_contract::validation::data_contract_validator::DataContractValidator; use dpp::document::document_stub::DocumentStub; @@ -831,7 +832,7 @@ fn test_family_basic_queries() { assert_eq!(results.len(), 1); let (proof_root_hash, proof_results, _) = drive - .query_documents_from_contract_as_grove_proof_only_get_elements( + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( &contract, person_document_type, query_cbor.as_slice(), @@ -872,7 +873,7 @@ fn test_family_basic_queries() { assert_eq!(results.len(), 1); let (proof_root_hash, proof_results, _) = drive - .query_documents_from_contract_as_grove_proof_only_get_elements( + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( &contract, person_document_type, query_cbor.as_slice(), @@ -928,7 +929,7 @@ fn test_family_basic_queries() { assert_eq!(results.len(), 1); let (proof_root_hash, proof_results, _) = drive - .query_documents_from_contract_as_grove_proof_only_get_elements( + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( &contract, person_document_type, query_cbor.as_slice(), @@ -971,7 +972,7 @@ fn test_family_basic_queries() { assert_eq!(results.len(), 1); let (proof_root_hash, proof_results, _) = drive - .query_documents_from_contract_as_grove_proof_only_get_elements( + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( &contract, person_document_type, query_cbor.as_slice(), @@ -1022,7 +1023,7 @@ fn test_family_basic_queries() { assert_eq!(results.len(), 0); let (proof_root_hash, proof_results, _) = drive - .query_documents_from_contract_as_grove_proof_only_get_elements( + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( &contract, person_document_type, query_cbor.as_slice(), @@ -1062,7 +1063,7 @@ fn test_family_basic_queries() { assert_eq!(results.len(), 1); let (proof_root_hash, proof_results, _) = drive - .query_documents_from_contract_as_grove_proof_only_get_elements( + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( &contract, person_document_type, query_cbor.as_slice(), @@ -1890,7 +1891,7 @@ fn test_family_basic_queries() { serializer::value_to_cbor(query_value, None).expect("expected to serialize to cbor"); let (results, _, _) = drive - .query_documents_from_contract_cbor( + .query_raw_documents_from_contract_cbor_using_cbor_encoded_query_with_cost( query_cbor.as_slice(), contract_cbor.as_slice(), String::from("contact"), @@ -3008,7 +3009,7 @@ fn test_dpns_insertion_no_aliases() { assert_eq!(result.0.len(), 15); let (proof_root_hash, proof_results, _) = drive - .query_documents_from_contract_as_grove_proof_only_get_elements( + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( &contract, domain_document_type, query_cbor.as_slice(), @@ -3060,7 +3061,7 @@ fn test_dpns_insertion_with_aliases() { assert_eq!(result.0.len(), 24); let (proof_root_hash, proof_results, _) = drive - .query_documents_from_contract_as_grove_proof_only_get_elements( + .query_proof_of_documents_using_cbor_encoded_query_only_get_elements( &contract, domain_document_type, query_cbor.as_slice(), diff --git a/packages/rs-drive/tests/query_tests_history.rs b/packages/rs-drive/tests/query_tests_history.rs index 95721332fd2..c09247c0894 100644 --- a/packages/rs-drive/tests/query_tests_history.rs +++ b/packages/rs-drive/tests/query_tests_history.rs @@ -44,7 +44,9 @@ use serde::{Deserialize, Serialize}; use serde_json::json; use drive::common; -use drive::common::helpers::setup::setup_drive; + +use drive::tests::helpers::setup::setup_drive; + use drive::contract::Contract; use drive::drive::batch::GroveDbOpBatch; use drive::drive::config::DriveConfig; @@ -1495,7 +1497,7 @@ fn test_query_historical() { serializer::value_to_cbor(query_value, None).expect("expected to serialize to cbor"); let (results, _, _) = drive - .query_documents_from_contract_cbor( + .query_raw_documents_from_contract_cbor_using_cbor_encoded_query_with_cost( query_cbor.as_slice(), contract_cbor.as_slice(), String::from("contact"), diff --git a/packages/wasm-dpp/Cargo.toml b/packages/wasm-dpp/Cargo.toml index 8780683152e..dd675122cc2 100644 --- a/packages/wasm-dpp/Cargo.toml +++ b/packages/wasm-dpp/Cargo.toml @@ -15,7 +15,7 @@ js-sys = "0.3.53" web-sys = { version = "0.3.6", features = ["console"] } thiserror = { version = "1.0" } serde-wasm-bindgen = "0.4.3" -dpp = { path = "../rs-dpp" } +dpp = { path = "../rs-dpp", default-features = false } itertools = { version="0.10.5"} console_error_panic_hook = { version="0.1.7"} diff --git a/packages/wasm-dpp/src/errors/consensus/basic/identity/invalid_identity_credit_withdrawal_transition_core_fee_error.rs b/packages/wasm-dpp/src/errors/consensus/basic/identity/invalid_identity_credit_withdrawal_transition_core_fee_error.rs index edbe4a7d012..bb2cfa55528 100644 --- a/packages/wasm-dpp/src/errors/consensus/basic/identity/invalid_identity_credit_withdrawal_transition_core_fee_error.rs +++ b/packages/wasm-dpp/src/errors/consensus/basic/identity/invalid_identity_credit_withdrawal_transition_core_fee_error.rs @@ -18,8 +18,8 @@ impl From<&InvalidIdentityCreditWithdrawalTransitionCoreFeeError> #[wasm_bindgen(js_class=InvalidIdentityCreditWithdrawalTransitionCoreFeeError)] impl InvalidIdentityCreditWithdrawalTransitionCoreFeeErrorWasm { #[wasm_bindgen(js_name=getCoreFee)] - pub fn core_fee(&self) -> u32 { - self.inner.core_fee() + pub fn core_fee_per_byte(&self) -> u32 { + self.inner.core_fee_per_byte() } #[wasm_bindgen(js_name=getCode)] diff --git a/packages/wasm-dpp/src/errors/consensus/basic/identity/invalid_identity_credit_withdrawal_transition_pooling_error.rs b/packages/wasm-dpp/src/errors/consensus/basic/identity/invalid_identity_credit_withdrawal_transition_pooling_error.rs new file mode 100644 index 00000000000..12f3e179f4c --- /dev/null +++ b/packages/wasm-dpp/src/errors/consensus/basic/identity/invalid_identity_credit_withdrawal_transition_pooling_error.rs @@ -0,0 +1,29 @@ +use dpp::consensus::basic::identity::NotImplementedIdentityCreditWithdrawalTransitionPoolingError; +use dpp::consensus::ConsensusError; +use wasm_bindgen::prelude::*; + +#[wasm_bindgen(js_name=NotImplementedIdentityCreditWithdrawalTransitionPoolingError)] +pub struct NotImplementedIdentityCreditWithdrawalTransitionPoolingErrorWasm { + inner: NotImplementedIdentityCreditWithdrawalTransitionPoolingError, +} + +impl From<&NotImplementedIdentityCreditWithdrawalTransitionPoolingError> + for NotImplementedIdentityCreditWithdrawalTransitionPoolingErrorWasm +{ + fn from(e: &NotImplementedIdentityCreditWithdrawalTransitionPoolingError) -> Self { + Self { inner: e.clone() } + } +} + +#[wasm_bindgen(js_class=NotImplementedIdentityCreditWithdrawalTransitionPoolingError)] +impl NotImplementedIdentityCreditWithdrawalTransitionPoolingErrorWasm { + #[wasm_bindgen(js_name=getPooling)] + pub fn pooling(&self) -> u8 { + self.inner.pooling() + } + + #[wasm_bindgen(js_name=getCode)] + pub fn code(&self) -> u32 { + ConsensusError::from(self.inner.clone()).code() + } +} diff --git a/packages/wasm-dpp/src/errors/consensus/basic/identity/mod.rs b/packages/wasm-dpp/src/errors/consensus/basic/identity/mod.rs index 2b25c35fcd4..598830189ef 100644 --- a/packages/wasm-dpp/src/errors/consensus/basic/identity/mod.rs +++ b/packages/wasm-dpp/src/errors/consensus/basic/identity/mod.rs @@ -12,6 +12,7 @@ mod invalid_identity_asset_lock_transaction_error; mod invalid_identity_asset_lock_transaction_output_error; mod invalid_identity_credit_withdrawal_transition_core_fee_error; mod invalid_identity_credit_withdrawal_transition_output_script_error; +mod invalid_identity_credit_withdrawal_transition_pooling_error; pub mod invalid_identity_key_signature_error; mod invalid_identity_public_key_data_error; mod invalid_identity_public_key_security_level_error; @@ -35,6 +36,7 @@ pub use invalid_identity_asset_lock_transaction_error::*; pub use invalid_identity_asset_lock_transaction_output_error::*; pub use invalid_identity_credit_withdrawal_transition_core_fee_error::*; pub use invalid_identity_credit_withdrawal_transition_output_script_error::*; +pub use invalid_identity_credit_withdrawal_transition_pooling_error::*; pub use invalid_identity_key_signature_error::*; pub use invalid_identity_public_key_data_error::*; pub use invalid_identity_public_key_security_level_error::*; diff --git a/packages/wasm-dpp/src/errors/consensus/state/document/invalid_document_revision_error.rs b/packages/wasm-dpp/src/errors/consensus/state/document/invalid_document_revision_error.rs index 307171e75ef..43af8cd40de 100644 --- a/packages/wasm-dpp/src/errors/consensus/state/document/invalid_document_revision_error.rs +++ b/packages/wasm-dpp/src/errors/consensus/state/document/invalid_document_revision_error.rs @@ -1,11 +1,11 @@ use crate::buffer::Buffer; -use dpp::identifier::Identifier; +use dpp::{identifier::Identifier, prelude::Revision}; use wasm_bindgen::prelude::*; #[wasm_bindgen(js_name=InvalidDocumentRevisionError)] pub struct InvalidDocumentRevisionErrorWasm { document_id: Identifier, - current_revision: u32, + current_revision: Revision, code: u32, } @@ -17,7 +17,7 @@ impl InvalidDocumentRevisionErrorWasm { } #[wasm_bindgen(js_name=getCurrentRevision)] - pub fn current_revision(&self) -> u32 { + pub fn current_revision(&self) -> Revision { self.current_revision } @@ -28,7 +28,7 @@ impl InvalidDocumentRevisionErrorWasm { } impl InvalidDocumentRevisionErrorWasm { - pub fn new(document_id: Identifier, current_revision: u32, code: u32) -> Self { + pub fn new(document_id: Identifier, current_revision: Revision, code: u32) -> Self { Self { document_id, current_revision, diff --git a/packages/wasm-dpp/src/errors/consensus_error.rs b/packages/wasm-dpp/src/errors/consensus_error.rs index b61d74fe605..0365ce06671 100644 --- a/packages/wasm-dpp/src/errors/consensus_error.rs +++ b/packages/wasm-dpp/src/errors/consensus_error.rs @@ -20,6 +20,7 @@ use crate::errors::consensus::basic::identity::{ InvalidIdentityKeySignatureErrorWasm, InvalidIdentityPublicKeyDataErrorWasm, InvalidIdentityPublicKeySecurityLevelErrorWasm, InvalidInstantAssetLockProofErrorWasm, InvalidInstantAssetLockProofSignatureErrorWasm, MissingMasterPublicKeyErrorWasm, + NotImplementedIdentityCreditWithdrawalTransitionPoolingErrorWasm, }; use crate::errors::consensus::state::identity::{ DuplicatedIdentityPublicKeyIdStateErrorWasm, DuplicatedIdentityPublicKeyStateErrorWasm, @@ -154,6 +155,9 @@ pub fn from_consensus_error_ref(e: &DPPConsensusError) -> JsValue { DPPConsensusError::InvalidIdentityCreditWithdrawalTransitionOutputScriptError(e) => { InvalidIdentityCreditWithdrawalTransitionOutputScriptErrorWasm::from(e).into() } + DPPConsensusError::NotImplementedIdentityCreditWithdrawalTransitionPoolingError(e) => { + NotImplementedIdentityCreditWithdrawalTransitionPoolingErrorWasm::from(e).into() + } DPPConsensusError::IdentityInsufficientBalanceError(e) => { IdentityInsufficientBalanceErrorWasm::from(e).into() } diff --git a/packages/wasm-dpp/src/state_repository.rs b/packages/wasm-dpp/src/state_repository.rs index 61c47cc3fb9..de70f6ed1e1 100644 --- a/packages/wasm-dpp/src/state_repository.rs +++ b/packages/wasm-dpp/src/state_repository.rs @@ -586,12 +586,4 @@ impl StateRepositoryLike for ExternalStateRepositoryLikeWrapper { async fn fetch_latest_withdrawal_transaction_index(&self) -> Result { todo!() } - - async fn enqueue_withdrawal_transaction( - &self, - _index: u64, - _transaction_bytes: Vec, - ) -> Result<()> { - todo!() - } } diff --git a/packages/withdrawals-contract/schema/withdrawals-documents.json b/packages/withdrawals-contract/schema/withdrawals-documents.json index 9b08af74d5e..3d1f5e06e1d 100644 --- a/packages/withdrawals-contract/schema/withdrawals-documents.json +++ b/packages/withdrawals-contract/schema/withdrawals-documents.json @@ -4,40 +4,61 @@ "type": "object", "indices": [ { - "name": "pooling", + "name": "identityStatus", "properties": [ { - "status": "asc" + "$ownerId": "asc" }, { - "pooling": "asc" + "status": "asc" }, { - "coreFeePerByte": "asc" + "$createdAt": "asc" + } + ], + "unique": false + }, + { + "name": "identityRecent", + "properties": [ + { + "$ownerId": "asc" }, { "$updatedAt": "asc" + }, + { + "status": "asc" } ], "unique": false }, { - "name": "transaction", + "name": "pooling", "properties": [ { "status": "asc" }, { - "transactionId": "asc" + "pooling": "asc" + }, + { + "coreFeePerByte": "asc" + }, + { + "$updatedAt": "asc" } ], "unique": false }, { - "name": "ownerId", + "name": "transaction", "properties": [ { - "$ownerId": "asc" + "status": "asc" + }, + { + "transactionId": "asc" } ], "unique": false @@ -56,6 +77,11 @@ "description": "The Core height on which transaction was signed", "minimum": 1 }, + "transactionIndex": { + "type": "integer", + "description": "Sequential index used to create withdrawal transaction", + "minimum": 1 + }, "amount": { "type": "integer", "description": "The amount to be withdrawn", @@ -64,13 +90,16 @@ "coreFeePerByte": { "type": "integer", "description": "This is the fee that you are willing to spend for this transaction in Duffs/Byte", - "minimum": 1 + "minimum": 1, + "maximum": 4294967295 }, "pooling": { "type": "integer", "description": "This indicated the level at which Platform should try to pool this transaction", "enum": [ - 0 + 0, + 1, + 2 ] }, "outputScript": { @@ -98,7 +127,8 @@ "coreFeePerByte", "pooling", "outputScript", + "transactionIndex", "status" ] } -} +} \ No newline at end of file diff --git a/packages/withdrawals-contract/test/unit/withdrawalsContract.spec.js b/packages/withdrawals-contract/test/unit/withdrawalsContract.spec.js index b8809615677..a2a1bbdd7ea 100644 --- a/packages/withdrawals-contract/test/unit/withdrawalsContract.spec.js +++ b/packages/withdrawals-contract/test/unit/withdrawalsContract.spec.js @@ -42,6 +42,7 @@ describe('Withdrawals contract', () => { beforeEach(() => { rawWithdrawalDocument = { transactionId: Buffer.alloc(32, 1), + transactionIndex: 42, amount: 1000, coreFeePerByte: 1, pooling: 0, @@ -249,6 +250,46 @@ describe('Withdrawals contract', () => { }); }); + describe('transactionIndex', () => { + it('should be integer', () => { + rawWithdrawalDocument.transactionIndex = 'string'; + + try { + dpp.document.create(dataContract, identityId, 'withdrawal', rawWithdrawalDocument); + + expect.fail('should throw error'); + } catch (e) { + expect(e.name).to.equal('InvalidDocumentError'); + expect(e.getErrors()).to.have.a.lengthOf(1); + + const [error] = e.getErrors(); + + expect(error.name).to.equal('JsonSchemaError'); + expect(error.keyword).to.equal('type'); + expect(error.params.type).to.equal('integer'); + } + }); + + it('should be at least 1', () => { + rawWithdrawalDocument.transactionIndex = 0; + + try { + dpp.document.create(dataContract, identityId, 'withdrawal', rawWithdrawalDocument); + + expect.fail('should throw error'); + } catch (e) { + expect(e.name).to.equal('InvalidDocumentError'); + expect(e.getErrors()).to.have.a.lengthOf(1); + + const [error] = e.getErrors(); + + expect(error.name).to.equal('JsonSchemaError'); + expect(error.keyword).to.equal('minimum'); + expect(error.params.limit).to.equal(1); + } + }); + }); + describe('coreFeePerByte', () => { it('should be present', async () => { delete rawWithdrawalDocument.coreFeePerByte; diff --git a/yarn.lock b/yarn.lock index 4b0a13493cc..12b3953f17a 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1879,6 +1879,7 @@ __metadata: dependencies: "@dashevo/dashcore-lib": "github:dashevo/dashcore-lib#3527419e8739b5e7d4017028d642dba8851c3e25" "@dashevo/dpp": "workspace:*" + "@dashevo/withdrawals-contract": "workspace:*" cargo-cp-artifact: ^0.1.6 cbor: ^8.0.0 chai: ^4.3.4