From 30fe2ae43bcf86f23ab59452043664aa9374b728 Mon Sep 17 00:00:00 2001 From: Filipe Azevedo Date: Mon, 29 Jan 2024 17:44:38 +0000 Subject: [PATCH] Pre-indexing --- .gitignore | 3 +- Cargo.lock | 310 +- Cargo.toml | 1 + chain/arweave/src/chain.rs | 2 + chain/cosmos/src/chain.rs | 2 + chain/dataset/Cargo.toml | 24 + chain/dataset/src/chain.rs | 221 + chain/dataset/src/data_source.rs | 469 ++ chain/dataset/src/lib.rs | 11 + chain/dataset/src/mapper.rs | 80 + chain/dataset/src/runtime.rs | 70 + chain/dataset/src/trigger.rs | 163 + chain/ethereum/src/chain.rs | 20 +- chain/ethereum/src/runtime/runtime_adapter.rs | 4 +- chain/near/src/chain.rs | 2 + chain/starknet/src/chain.rs | 6 +- chain/substreams/src/chain.rs | 2 + core/Cargo.toml | 14 + core/src/subgraph/context/instance/mod.rs | 6 +- core/src/subgraph/context/mod.rs | 4 +- core/src/subgraph/inputs.rs | 9 +- core/src/subgraph/instance_manager.rs | 73 +- core/src/subgraph/registrar.rs | 18 + core/src/subgraph/runner.rs | 56 +- core/src/subgraph/stream.rs | 12 +- graph/Cargo.toml | 3 + graph/src/blockchain/mock.rs | 2 + graph/src/blockchain/mod.rs | 6 + graph/src/components/store/mod.rs | 66 + graph/src/components/store/traits.rs | 27 + graph/src/data/subgraph/mod.rs | 6 +- graph/src/data_source/mod.rs | 6 + graph/src/env/mod.rs | 5 +- graph/src/indexer/README.md | 75 + graph/src/indexer/TODO.md | 45 + graph/src/indexer/block_stream.rs | 399 ++ graph/src/indexer/mod.rs | 499 ++ graph/src/indexer/store/mod.rs | 5 + graph/src/indexer/store/postgres.rs | 425 ++ graph/src/indexer/store/sled.rs | 282 + graph/src/lib.rs | 2 + node/Cargo.toml | 9 + node/src/bin/indexer.rs | 89 + node/src/main.rs | 20 +- runtime/test/src/common.rs | 2 + runtime/test/src/test.rs | 7 +- server/index-node/src/resolver.rs | 21 +- .../down.sql | 3 + .../up.sql | 8 + store/postgres/src/deployment.rs | 202 +- store/postgres/src/deployment_store.rs | 72 +- store/postgres/src/primary.rs | 101 +- store/postgres/src/subgraph_store.rs | 29 +- store/postgres/src/writable.rs | 58 +- store/test-store/src/store.rs | 30 +- store/test-store/tests/graph/entity_cache.rs | 33 +- .../test-store/tests/postgres/aggregation.rs | 8 +- store/test-store/tests/postgres/graft.rs | 37 +- store/test-store/tests/postgres/store.rs | 9 +- store/test-store/tests/postgres/subgraph.rs | 52 +- store/test-store/tests/postgres/writable.rs | 11 +- tests/src/fixture/mod.rs | 11 +- tests/tests/runner_tests.rs | 29 +- transforms/uniswap/Cargo.toml | 27 + transforms/uniswap/abis/ERC20.json | 222 + transforms/uniswap/abis/ERC20NameBytes.json | 17 + transforms/uniswap/abis/ERC20SymbolBytes.json | 17 + .../abis/NonfungiblePositionManager.json | 1193 ++++ transforms/uniswap/abis/factory.json | 198 + transforms/uniswap/abis/pool.json | 988 +++ transforms/uniswap/build.rs | 26 + transforms/uniswap/proto/uniswap.proto | 140 + transforms/uniswap/src/abi/erc20.rs | 1249 ++++ transforms/uniswap/src/abi/factory.rs | 1028 +++ transforms/uniswap/src/abi/mod.rs | 17 + transforms/uniswap/src/abi/pool.rs | 5135 +++++++++++++++ transforms/uniswap/src/abi/positionmanager.rs | 5863 +++++++++++++++++ transforms/uniswap/src/lib.rs | 157 + .../src/proto/edgeandnode.uniswap.v1.rs | 268 + transforms/uniswap/src/proto/mod.rs | 7 + transforms/uniswap/src/types.rs | 201 + 81 files changed, 20927 insertions(+), 102 deletions(-) create mode 100644 chain/dataset/Cargo.toml create mode 100644 chain/dataset/src/chain.rs create mode 100644 chain/dataset/src/data_source.rs create mode 100644 chain/dataset/src/lib.rs create mode 100644 chain/dataset/src/mapper.rs create mode 100644 chain/dataset/src/runtime.rs create mode 100644 chain/dataset/src/trigger.rs create mode 100644 graph/src/indexer/README.md create mode 100644 graph/src/indexer/TODO.md create mode 100644 graph/src/indexer/block_stream.rs create mode 100644 graph/src/indexer/mod.rs create mode 100644 graph/src/indexer/store/mod.rs create mode 100644 graph/src/indexer/store/postgres.rs create mode 100644 graph/src/indexer/store/sled.rs create mode 100644 node/src/bin/indexer.rs create mode 100644 store/postgres/migrations/2024-03-06-134207_subgraph-segments/down.sql create mode 100644 store/postgres/migrations/2024-03-06-134207_subgraph-segments/up.sql create mode 100644 transforms/uniswap/Cargo.toml create mode 100644 transforms/uniswap/abis/ERC20.json create mode 100644 transforms/uniswap/abis/ERC20NameBytes.json create mode 100644 transforms/uniswap/abis/ERC20SymbolBytes.json create mode 100644 transforms/uniswap/abis/NonfungiblePositionManager.json create mode 100644 transforms/uniswap/abis/factory.json create mode 100644 transforms/uniswap/abis/pool.json create mode 100644 transforms/uniswap/build.rs create mode 100644 transforms/uniswap/proto/uniswap.proto create mode 100644 transforms/uniswap/src/abi/erc20.rs create mode 100644 transforms/uniswap/src/abi/factory.rs create mode 100644 transforms/uniswap/src/abi/mod.rs create mode 100644 transforms/uniswap/src/abi/pool.rs create mode 100644 transforms/uniswap/src/abi/positionmanager.rs create mode 100644 transforms/uniswap/src/lib.rs create mode 100644 transforms/uniswap/src/proto/edgeandnode.uniswap.v1.rs create mode 100644 transforms/uniswap/src/proto/mod.rs create mode 100644 transforms/uniswap/src/types.rs diff --git a/.gitignore b/.gitignore index 15ad2465251..507acf77dff 100644 --- a/.gitignore +++ b/.gitignore @@ -32,4 +32,5 @@ lcov.info # Docker volumes and debug logs .postgres -logfile \ No newline at end of file +logfile +sled_indexer_db diff --git a/Cargo.lock b/Cargo.lock index a49f27bf7b9..8bcc141f0bd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -396,6 +396,30 @@ dependencies = [ "generic-array", ] +[[package]] +name = "borsh" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f58b559fd6448c6e2fd0adb5720cd98a2506594cafa4737ff98c396f3e82f667" +dependencies = [ + "borsh-derive", + "cfg_aliases", +] + +[[package]] +name = "borsh-derive" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aadb5b6ccbd078890f6d7003694e33816e6b784358f18e15e7e6d9f065a57cd" +dependencies = [ + "once_cell", + "proc-macro-crate 3.1.0", + "proc-macro2", + "quote", + "syn 2.0.48", + "syn_derive", +] + [[package]] name = "bs58" version = "0.4.0" @@ -457,6 +481,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "chrono" version = "0.4.31" @@ -1280,6 +1310,16 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fs2" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "funty" version = "2.0.0" @@ -1493,6 +1533,7 @@ dependencies = [ "async-trait", "atomic_refcell", "bigdecimal 0.1.2", + "borsh", "bytes", "chrono", "cid", @@ -1534,6 +1575,7 @@ dependencies = [ "serde_plain", "serde_regex", "serde_yaml", + "sled", "slog", "slog-async", "slog-envlogger", @@ -1597,6 +1639,26 @@ dependencies = [ "tonic-build", ] +[[package]] +name = "graph-chain-dataset" +version = "0.34.0" +dependencies = [ + "anyhow", + "base64 0.20.0", + "graph", + "graph-chain-ethereum", + "graph-runtime-wasm", + "hex", + "lazy_static", + "prost", + "prost-types", + "semver", + "serde", + "sled", + "tokio", + "tonic-build", +] + [[package]] name = "graph-chain-ethereum" version = "0.34.0" @@ -1682,11 +1744,13 @@ dependencies = [ "atomic_refcell", "bytes", "cid", + "ethabi", "futures 0.1.31", "futures 0.3.16", "graph", "graph-chain-arweave", "graph-chain-cosmos", + "graph-chain-dataset", "graph-chain-ethereum", "graph-chain-near", "graph-chain-starknet", @@ -1694,9 +1758,18 @@ dependencies = [ "graph-runtime-wasm", "ipfs-api", "ipfs-api-backend-hyper", + "lru_time_cache", + "once_cell", + "semver", + "serde", + "serde_json", "serde_yaml", + "sled", + "substreams", + "substreams-ethereum 0.8.0", "tower 0.4.13 (git+https://github.com/tower-rs/tower.git)", "tower-test", + "uniswap", "uuid", ] @@ -1720,6 +1793,7 @@ dependencies = [ name = "graph-node" version = "0.34.0" dependencies = [ + "borsh", "clap", "diesel", "env_logger", @@ -1728,6 +1802,7 @@ dependencies = [ "graph", "graph-chain-arweave", "graph-chain-cosmos", + "graph-chain-dataset", "graph-chain-ethereum", "graph-chain-near", "graph-chain-starknet", @@ -1747,7 +1822,9 @@ dependencies = [ "prometheus", "serde", "shellexpand", + "sled", "termcolor", + "uniswap", "url", ] @@ -2843,7 +2920,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc076939022111618a5026d3be019fd8b366e76314538ff9a1b59ffbcbf98bcd" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.1.0", "proc-macro-error", "proc-macro2", "quote", @@ -3075,7 +3152,7 @@ version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c6e626dc84025ff56bf1476ed0e30d10c84d7f89a475ef46ebabee1095a8fba" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.1.0", "proc-macro2", "quote", "syn 1.0.107", @@ -3163,7 +3240,40 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dfb61232e34fcb633f43d12c58f83c1df82962dcdfa565a4e866ffc17dafe12" dependencies = [ - "phf_shared", + "phf_shared 0.8.0", +] + +[[package]] +name = "phf" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +dependencies = [ + "phf_macros", + "phf_shared 0.11.2", +] + +[[package]] +name = "phf_generator" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +dependencies = [ + "phf_shared 0.11.2", + "rand", +] + +[[package]] +name = "phf_macros" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" +dependencies = [ + "phf_generator", + "phf_shared 0.11.2", + "proc-macro2", + "quote", + "syn 2.0.48", ] [[package]] @@ -3175,6 +3285,15 @@ dependencies = [ "siphasher", ] +[[package]] +name = "phf_shared" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project" version = "1.0.12" @@ -3337,6 +3456,15 @@ dependencies = [ "toml 0.5.11", ] +[[package]] +name = "proc-macro-crate" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +dependencies = [ + "toml_edit 0.21.0", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -4134,6 +4262,22 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c307a32c1c5c437f38c7fd45d753050587732ba8628319fbdf12a7e289ccc590" +[[package]] +name = "sled" +version = "0.34.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f96b4737c2ce5987354855aed3797279def4ebf734436c6aa4552cf8e169935" +dependencies = [ + "crc32fast", + "crossbeam-epoch", + "crossbeam-utils", + "fs2", + "fxhash", + "libc", + "log", + "parking_lot 0.11.2", +] + [[package]] name = "slice-group-by" version = "0.3.1" @@ -4400,6 +4544,130 @@ dependencies = [ "substreams", ] +[[package]] +name = "substreams-ethereum" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03a426822f6032fa28145cb2cb7661e70554da839a40ee08b6348f1479a44e1b" +dependencies = [ + "getrandom", + "substreams", + "substreams-ethereum-abigen 0.8.0", + "substreams-ethereum-core 0.8.0", + "substreams-ethereum-derive 0.8.0", +] + +[[package]] +name = "substreams-ethereum" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48f45dc04be50b7ca08d6d5c4560ee3eeba16ccaa1c124d0361bb30b5b84e28b" +dependencies = [ + "getrandom", + "num-bigint 0.4.4", + "substreams", + "substreams-ethereum-abigen 0.9.9", + "substreams-ethereum-core 0.9.9", + "substreams-ethereum-derive 0.9.9", +] + +[[package]] +name = "substreams-ethereum-abigen" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd2ba34bc4d19288e7487aa2b13ee75955222809d2dab48c9dad33230a6f4732" +dependencies = [ + "anyhow", + "ethabi", + "heck", + "hex", + "prettyplease", + "proc-macro2", + "quote", + "substreams-ethereum-core 0.8.0", + "syn 1.0.107", +] + +[[package]] +name = "substreams-ethereum-abigen" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c04307913a355aaf2a1bb7186d4bc7e36875f3d4aff77b47e83f1b63b24da55" +dependencies = [ + "anyhow", + "ethabi", + "heck", + "hex", + "prettyplease", + "proc-macro2", + "quote", + "substreams-ethereum-core 0.9.9", + "syn 1.0.107", +] + +[[package]] +name = "substreams-ethereum-core" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f680a56fc8df55dfd0b3465af2ed6f0eb471b4a0b5a9e0e56c13377911104b01" +dependencies = [ + "bigdecimal 0.3.1", + "ethabi", + "getrandom", + "num-bigint 0.4.4", + "prost", + "prost-build", + "prost-types", + "substreams", +] + +[[package]] +name = "substreams-ethereum-core" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db9048cc9a66873ab7069ef958c2684994e6ee323da49c186b19156fdb4ca131" +dependencies = [ + "bigdecimal 0.3.1", + "ethabi", + "getrandom", + "num-bigint 0.4.4", + "prost", + "prost-build", + "prost-types", + "substreams", +] + +[[package]] +name = "substreams-ethereum-derive" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b40d3bcb83759b62c2ed372b582be8ba46fc844362a7f7a35fa0269590066118" +dependencies = [ + "ethabi", + "heck", + "hex", + "proc-macro2", + "quote", + "substreams-ethereum-abigen 0.8.0", + "syn 1.0.107", +] + +[[package]] +name = "substreams-ethereum-derive" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e862928bee8653f5c9291ac619c8dc0da14ca61d8cd8d89b3acdbbde4d0bf304" +dependencies = [ + "ethabi", + "heck", + "hex", + "num-bigint 0.4.4", + "proc-macro2", + "quote", + "substreams-ethereum-abigen 0.9.9", + "syn 1.0.107", +] + [[package]] name = "substreams-head-tracker" version = "0.34.0" @@ -4469,6 +4737,18 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn_derive" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "sync_wrapper" version = "0.1.1" @@ -4738,7 +5018,7 @@ dependencies = [ "log", "parking_lot 0.11.2", "percent-encoding", - "phf", + "phf 0.8.0", "pin-project-lite", "postgres-protocol", "postgres-types", @@ -5213,6 +5493,28 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +[[package]] +name = "uniswap" +version = "0.1.0" +dependencies = [ + "anyhow", + "borsh", + "ethabi", + "graph", + "hex", + "num-bigint 0.4.4", + "num-traits", + "phf 0.11.2", + "prost", + "prost-build", + "prost-types", + "substreams", + "substreams-entity-change", + "substreams-ethereum 0.9.9", + "tiny-keccak 2.0.2", + "tonic-build", +] + [[package]] name = "unreachable" version = "1.0.0" diff --git a/Cargo.toml b/Cargo.toml index 9c25ea1ac29..15befd7d230 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,7 @@ members = [ "server/*", "store/*", "substreams/*", + "transforms/*", "graph", "tests", ] diff --git a/chain/arweave/src/chain.rs b/chain/arweave/src/chain.rs index 65019d0724b..ec85526182b 100644 --- a/chain/arweave/src/chain.rs +++ b/chain/arweave/src/chain.rs @@ -6,6 +6,7 @@ use graph::blockchain::{ EmptyNodeCapabilities, NoopRuntimeAdapter, }; use graph::cheap_clone::CheapClone; +use graph::components::metrics::stopwatch::StopwatchMetrics; use graph::components::store::DeploymentCursorTracker; use graph::data::subgraph::UnifiedMappingApiVersion; use graph::env::EnvVars; @@ -117,6 +118,7 @@ impl Blockchain for Chain { start_blocks: Vec, filter: Arc, unified_api_version: UnifiedMappingApiVersion, + _stopwatch: StopwatchMetrics, ) -> Result>, Error> { let adapter = self .triggers_adapter( diff --git a/chain/cosmos/src/chain.rs b/chain/cosmos/src/chain.rs index 6a493a144d4..69088f5d7c5 100644 --- a/chain/cosmos/src/chain.rs +++ b/chain/cosmos/src/chain.rs @@ -1,5 +1,6 @@ use graph::blockchain::firehose_block_ingestor::FirehoseBlockIngestor; use graph::blockchain::BlockIngestor; +use graph::components::metrics::stopwatch::StopwatchMetrics; use graph::env::EnvVars; use graph::prelude::MetricsRegistry; use graph::substreams::Clock; @@ -110,6 +111,7 @@ impl Blockchain for Chain { start_blocks: Vec, filter: Arc, unified_api_version: UnifiedMappingApiVersion, + _stopwatch: StopwatchMetrics, ) -> Result>, Error> { let adapter = self .triggers_adapter( diff --git a/chain/dataset/Cargo.toml b/chain/dataset/Cargo.toml new file mode 100644 index 00000000000..e60fb626171 --- /dev/null +++ b/chain/dataset/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "graph-chain-dataset" +version.workspace = true +edition.workspace = true + +[build-dependencies] +tonic-build = { workspace = true } + +[dependencies] +graph = { path = "../../graph" } +graph-chain-ethereum = { path = "../ethereum" } +graph-runtime-wasm = { path = "../../runtime/wasm" } +lazy_static = "1.2.0" +serde = "1.0" +prost = { workspace = true } +prost-types = { workspace = true } +anyhow = "1.0" +hex = "0.4.3" +semver = "1.0.21" +base64 = "0.20.0" +sled = "0.34.7" + +[dev-dependencies] +tokio = { version = "1", features = ["full"] } diff --git a/chain/dataset/src/chain.rs b/chain/dataset/src/chain.rs new file mode 100644 index 00000000000..c28d6517815 --- /dev/null +++ b/chain/dataset/src/chain.rs @@ -0,0 +1,221 @@ +use crate::runtime::RuntimeAdapter; +use crate::{data_source::*, TriggerData, TriggerFilter, TriggersAdapter}; +use anyhow::{anyhow, Error}; +use graph::blockchain::client::ChainClient; +use graph::blockchain::{BlockIngestor, BlockTime, EmptyNodeCapabilities, NoopRuntimeAdapter}; +use graph::components::metrics::stopwatch::StopwatchMetrics; +use graph::components::store::{DeploymentCursorTracker, SubgraphStore}; +use graph::indexer::block_stream::IndexerBlockStream; +use graph::indexer::store::{PostgresIndexerDB, SledIndexerStore, DB_NAME}; +use graph::prelude::{ + BlockHash, CheapClone, DeploymentHash, EthereumCallCache, LoggerFactory, MetricsRegistry, +}; +use graph::{ + blockchain::{ + self, + block_stream::{BlockStream, FirehoseCursor}, + BlockPtr, Blockchain, BlockchainKind, IngestorError, RuntimeAdapter as RuntimeAdapterTrait, + }, + components::store::DeploymentLocator, + data::subgraph::UnifiedMappingApiVersion, + prelude::{async_trait, BlockNumber, ChainStore}, + slog::Logger, +}; +use graph_chain_ethereum::network::EthereumNetworkAdapters; + +use std::hash::Hash; +use std::sync::Arc; + +#[derive(Default, Debug, Clone)] +pub struct Block { + pub hash: BlockHash, + pub number: BlockNumber, + pub data: Box<[u8]>, +} + +impl blockchain::Block for Block { + fn ptr(&self) -> BlockPtr { + BlockPtr { + hash: self.hash.clone(), + number: self.number, + } + } + + fn parent_ptr(&self) -> Option { + None + } + + fn timestamp(&self) -> BlockTime { + BlockTime::NONE + } +} + +pub struct Chain { + pub(crate) eth_adapters: Option>, + pub(crate) call_cache: Arc, + pub(crate) logger_factory: LoggerFactory, + pub(crate) _metrics_registry: Arc, + pub(crate) subgraph_store: Arc, + chain_store: Arc, +} + +impl Chain { + pub fn new( + eth_adapters: Option>, + eth_call_cache: Arc, + logger_factory: LoggerFactory, + metrics_registry: Arc, + subgraph_store: Arc, + chain_store: Arc, + ) -> Self { + Self { + logger_factory, + _metrics_registry: metrics_registry, + eth_adapters, + call_cache: eth_call_cache, + subgraph_store, + chain_store, + } + } +} + +impl std::fmt::Debug for Chain { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "chain: substreams") + } +} + +#[async_trait] +impl Blockchain for Chain { + const KIND: BlockchainKind = BlockchainKind::Dataset; + + type Client = (); + type Block = Block; + type DataSource = DataSource; + type UnresolvedDataSource = UnresolvedDataSource; + + type DataSourceTemplate = NoopDataSourceTemplate; + type UnresolvedDataSourceTemplate = NoopDataSourceTemplate; + + /// Trigger data as parsed from the triggers adapter. + type TriggerData = TriggerData; + + /// Decoded trigger ready to be processed by the mapping. + /// New implementations should have this be the same as `TriggerData`. + type MappingTrigger = TriggerData; + + /// Trigger filter used as input to the triggers adapter. + type TriggerFilter = TriggerFilter; + + type NodeCapabilities = EmptyNodeCapabilities; + + fn triggers_adapter( + &self, + _log: &DeploymentLocator, + _capabilities: &Self::NodeCapabilities, + _unified_api_version: UnifiedMappingApiVersion, + ) -> Result>, Error> { + Ok(Arc::new(TriggersAdapter {})) + } + + async fn new_block_stream( + &self, + from: DeploymentLocator, + _store: impl DeploymentCursorTracker, + _start_blocks: Vec, + filter: Arc, + _unified_api_version: UnifiedMappingApiVersion, + stopwatch: StopwatchMetrics, + ) -> Result>, Error> { + // let deployment: &str = "QmagGaBm7FL9uQWg1bk52Eb3LTN4owkvxEKkirtyXNLQc9"; + // let hash = DeploymentHash::new(deployment).unwrap(); + + // let db = Arc::new(sled::open(DB_NAME).unwrap()); + // let store = Arc::new( + // SledIndexerStore::new( + // db, + // &hash, + // graph::indexer::store::StateSnapshotFrequency::Never, + // ) + // .unwrap(), + // ); + + let store = Arc::new( + PostgresIndexerDB::new( + self.subgraph_store.cheap_clone(), + from.clone(), + self.logger_factory + .component_logger(&format!("BlockStream from {}", from.hash), None), + stopwatch, + ) + .await, + ); + + let logger = graph::log::logger(true); + + let metrics = Arc::new(MetricsRegistry::mock()); + let handler = filter + .handler + .as_ref() + .ok_or(anyhow!("Expected dataset block stream to have a handler"))? + .clone(); + + Ok(Box::new(IndexerBlockStream::::new( + from.hash.clone(), + store, + None, + vec![], + vec![], + logger.clone(), + handler, + metrics, + ))) + } + + fn is_refetch_block_required(&self) -> bool { + false + } + async fn refetch_firehose_block( + &self, + _logger: &Logger, + _cursor: FirehoseCursor, + ) -> Result { + unimplemented!("This chain does not support Dynamic Data Sources. is_refetch_block_required always returns false, this shouldn't be called.") + } + + fn chain_store(&self) -> Arc { + self.chain_store.cheap_clone() + } + + async fn block_pointer_from_number( + &self, + _logger: &Logger, + number: BlockNumber, + ) -> Result { + // This is the same thing TriggersAdapter does, not sure if it's going to work but + // we also don't yet have a good way of getting this value until we sort out the + // chain store. + // TODO(filipe): Fix this once the chain_store is correctly setup for substreams. + Ok(BlockPtr { + hash: BlockHash::from(vec![0xff; 32]), + number, + }) + } + fn runtime_adapter(&self) -> Arc> { + match self.eth_adapters { + None => Arc::new(NoopRuntimeAdapter::default()), + Some(ref adapters) => Arc::new(RuntimeAdapter { + eth_adapters: adapters.cheap_clone(), + call_cache: self.call_cache.cheap_clone(), + }), + } + } + + fn chain_client(&self) -> Arc> { + Arc::new(ChainClient::Rpc(())) + } + + fn block_ingestor(&self) -> anyhow::Result> { + anyhow::bail!("Datasets don't use block ingestors") + } +} diff --git a/chain/dataset/src/data_source.rs b/chain/dataset/src/data_source.rs new file mode 100644 index 00000000000..dd7cb3c0749 --- /dev/null +++ b/chain/dataset/src/data_source.rs @@ -0,0 +1,469 @@ +use std::{collections::HashSet, sync::Arc}; + +use anyhow::{anyhow, bail, Context, Error}; +use graph::{ + blockchain, + components::{ + link_resolver::LinkResolver, store::DeploymentLocator, subgraph::InstanceDSTemplateInfo, + }, + data::subgraph::DeploymentHash, + prelude::{async_trait, ethabi::Contract, BlockNumber, CheapClone, Link}, + slog::Logger, +}; + +use serde::Deserialize; + +use crate::{chain::Chain, Block, TriggerData}; + +pub const DATASET_KIND: &str = "dataset"; + +const DYNAMIC_DATA_SOURCE_ERROR: &str = "Datasets do not support dynamic data sources"; +const TEMPLATE_ERROR: &str = "Datasets do not support templates"; + +const ALLOWED_MAPPING_KIND: [&str; 1] = ["dataset"]; +const DATASET_HANDLER_KIND: &str = "dataset"; +#[derive(Clone, Debug, PartialEq)] +/// Represents the DataSource portion of the manifest once it has been parsed +/// and the substream spkg has been downloaded + parsed. +pub struct DataSource { + pub kind: String, + pub network: Option, + pub name: String, + pub source: Source, + pub mapping: Mapping, + pub initial_block: Option, + pub context: Arc>, +} + +impl blockchain::DataSource for DataSource { + fn from_template_info( + _info: InstanceDSTemplateInfo, + _template: &graph::data_source::DataSourceTemplate, + ) -> Result { + Err(anyhow!("Substreams does not support templates")) + } + + fn address(&self) -> Option<&[u8]> { + None + } + + fn start_block(&self) -> BlockNumber { + self.initial_block.unwrap_or(0) + } + + fn end_block(&self) -> Option { + None + } + + fn name(&self) -> &str { + &self.name + } + + fn kind(&self) -> &str { + &self.kind + } + + fn network(&self) -> Option<&str> { + self.network.as_deref() + } + + fn context(&self) -> Arc> { + self.context.cheap_clone() + } + + fn creation_block(&self) -> Option { + None + } + + fn api_version(&self) -> semver::Version { + self.mapping.api_version.clone() + } + + fn runtime(&self) -> Option>> { + Some(self.mapping.handler.runtime.clone()) + } + + fn handler_kinds(&self) -> HashSet<&str> { + // This is placeholder, substreams do not have a handler kind. + vec![DATASET_HANDLER_KIND].into_iter().collect() + } + + // match_and_decode only seems to be used on the default trigger processor which substreams + // bypasses so it should be fine to leave it unimplemented. + fn match_and_decode( + &self, + _trigger: &TriggerData, + _block: &Arc, + _logger: &Logger, + ) -> Result>, Error> { + unimplemented!() + } + + fn is_duplicate_of(&self, _other: &Self) -> bool { + todo!() + } + + fn as_stored_dynamic_data_source(&self) -> graph::components::store::StoredDynamicDataSource { + unimplemented!("{}", DYNAMIC_DATA_SOURCE_ERROR) + } + + fn validate(&self) -> Vec { + let mut errs = vec![]; + + if &self.kind != DATASET_KIND { + errs.push(anyhow!( + "data source has invalid `kind`, expected {} but found {}", + DATASET_KIND, + self.kind + )) + } + + if self.name.is_empty() { + errs.push(anyhow!("name cannot be empty")); + } + + if !ALLOWED_MAPPING_KIND.contains(&self.mapping.kind.as_str()) { + errs.push(anyhow!( + "mapping kind has to be one of {:?}, found {}", + ALLOWED_MAPPING_KIND, + self.mapping.kind + )) + } + + errs + } + + fn from_stored_dynamic_data_source( + _template: &::DataSourceTemplate, + _stored: graph::components::store::StoredDynamicDataSource, + ) -> Result { + Err(anyhow!(DYNAMIC_DATA_SOURCE_ERROR)) + } +} + +#[derive(Clone, Debug, PartialEq)] +pub struct Source { + pub dataset: DeploymentHash, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct Mapping { + pub api_version: semver::Version, + pub kind: String, + pub handler: MappingHandler, + pub abis: Vec>, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct MappingHandler { + pub handler: String, + pub runtime: Arc>, +} + +#[derive(Clone, Debug, Deserialize, PartialEq, Eq)] +/// Raw representation of the data source for deserialization purposes. +pub struct UnresolvedDataSource { + pub kind: String, + pub network: Option, + pub name: String, + pub source: UnresolvedSource, + pub mapping: UnresolvedMapping, +} + +#[derive(Clone, Debug, Hash, Eq, PartialEq, Deserialize)] +pub struct UnresolvedMappingABI { + pub name: String, + pub file: Link, +} + +impl UnresolvedMappingABI { + pub async fn resolve( + self, + resolver: &Arc, + logger: &Logger, + ) -> Result { + let contract_bytes = resolver.cat(logger, &self.file).await.with_context(|| { + format!( + "failed to resolve ABI {} from {}", + self.name, self.file.link + ) + })?; + let contract = Contract::load(&*contract_bytes)?; + Ok(MappingABI { + name: self.name, + contract, + }) + } +} + +#[derive(Clone, Debug, Default, Hash, Eq, PartialEq, Deserialize)] +#[serde(rename_all = "camelCase")] +/// Text api_version, before parsing and validation. +pub struct UnresolvedMapping { + pub api_version: String, + // pub abis: Vec, + pub kind: String, + pub handler: String, + pub file: Link, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct MappingABI { + pub name: String, + pub contract: Contract, +} + +impl From for graph_chain_ethereum::MappingABI { + fn from(val: MappingABI) -> Self { + let MappingABI { name, contract } = val; + + graph_chain_ethereum::MappingABI { name, contract } + } +} + +#[async_trait] +impl blockchain::UnresolvedDataSource for UnresolvedDataSource { + async fn resolve( + self, + resolver: &Arc, + logger: &Logger, + _manifest_idx: u32, + ) -> Result { + let runtime = resolver.cat(&logger, &self.mapping.file).await?; + + // let abis = self + // .mapping + // .abis + // .into_iter() + // .map(|unresolved_abi| async { + // Result::<_, Error>::Ok(Arc::new(unresolved_abi.resolve(resolver, logger).await?)) + // }) + // .collect::>() + // .try_collect::>() + // .await?; + struct Hack<'a> { + name: &'a str, + contract: &'a str, + } + + let abis = vec![ + Hack { + name: "NonfungiblePositionManager", + contract: include_str!( + "../../../transforms/uniswap/abis/NonfungiblePositionManager.json" + ), + }, + Hack { + name: "Pool", + contract: include_str!("../../../transforms/uniswap/abis/pool.json"), + }, + Hack { + name: "Factory", + contract: include_str!("../../../transforms/uniswap/abis/factory.json"), + }, + Hack { + name: "ERC20", + contract: include_str!("../../../transforms/uniswap/abis/ERC20.json"), + }, + Hack { + name: "ERC20NameBytes", + contract: include_str!("../../../transforms/uniswap/abis/ERC20NameBytes.json"), + }, + Hack { + name: "ERC20SymbolBytes", + contract: include_str!("../../../transforms/uniswap/abis/ERC20SymbolBytes.json"), + }, + ] + .into_iter() + .flat_map(|h| { + Contract::load(h.contract.as_bytes()).map(|c| MappingABI { + name: h.name.into(), + contract: c, + }) + }) + .map(Arc::new) + .collect(); + + let dataset = match DeploymentHash::new(self.source.dataset) { + Ok(hash) => hash, + Err(s) => bail!("not a valid deployment hash {}", s), + }; + + Ok(DataSource { + kind: DATASET_KIND.into(), + network: self.network, + name: self.name, + source: Source { dataset }, + mapping: Mapping { + api_version: semver::Version::parse(&__self.mapping.api_version)?, + kind: self.mapping.kind, + handler: MappingHandler { + handler: self.mapping.handler, + runtime: Arc::new(runtime), + }, + abis, + }, + initial_block: self.source.start_block, + context: Arc::new(None), + }) + } +} + +#[derive(Clone, Debug, Default, Hash, Eq, PartialEq, Deserialize)] +#[serde(rename_all = "camelCase")] +/// Source is a part of the manifest and this is needed for parsing. +pub struct UnresolvedSource { + #[serde(rename = "startBlock", default)] + start_block: Option, + pub dataset: String, +} + +#[derive(Debug, Clone, Default, Deserialize)] +/// This is necessary for the Blockchain trait associated types, substreams do not support +/// data source templates so this is a noop and is not expected to be called. +pub struct NoopDataSourceTemplate {} + +impl blockchain::DataSourceTemplate for NoopDataSourceTemplate { + fn name(&self) -> &str { + unimplemented!("{}", TEMPLATE_ERROR); + } + + fn api_version(&self) -> semver::Version { + unimplemented!("{}", TEMPLATE_ERROR); + } + + fn runtime(&self) -> Option>> { + unimplemented!("{}", TEMPLATE_ERROR); + } + + fn manifest_idx(&self) -> u32 { + todo!() + } + + fn kind(&self) -> &str { + unimplemented!("{}", TEMPLATE_ERROR); + } +} + +#[async_trait] +impl blockchain::UnresolvedDataSourceTemplate for NoopDataSourceTemplate { + async fn resolve( + self, + _resolver: &Arc, + _logger: &Logger, + _manifest_idx: u32, + ) -> Result { + unimplemented!("{}", TEMPLATE_ERROR) + } +} + +#[cfg(test)] +mod test { + use std::{str::FromStr, sync::Arc}; + + use anyhow::Error; + use graph::{ + components::link_resolver::LinkResolver, + data::subgraph::DeploymentHash, + prelude::{async_trait, serde_yaml, JsonValueStream, Link}, + slog::Logger, + }; + + use crate::{DataSource, Mapping, UnresolvedDataSource, UnresolvedMapping, DATASET_KIND}; + + #[test] + fn parse_data_source() { + let ds: UnresolvedDataSource = serde_yaml::from_str(TEMPLATE_DATA_SOURCE).unwrap(); + let expected = UnresolvedDataSource { + kind: DATASET_KIND.into(), + network: Some("mainnet".into()), + name: "Uniswap".into(), + source: crate::UnresolvedSource { + start_block: None, + dataset: "".to_string(), + }, + mapping: UnresolvedMapping { + api_version: "0.0.7".into(), + kind: "substreams/graph-entities".into(), + handler: "".to_string(), + file: Link { + link: "/ipfs/QmbHnhUFZa6qqqRyubUYhXntox1TCBxqryaBM1iNGqVJzT".into(), + }, + // abis: vec![], + }, + }; + assert_eq!(ds, expected); + } + + fn gen_data_source() -> DataSource { + DataSource { + kind: DATASET_KIND.into(), + network: Some("mainnet".into()), + name: "Uniswap".into(), + source: crate::Source { + dataset: DeploymentHash::new("QmcmBvMt1hbPTtPWaBk7HXwXx71tzAKfa2eZeyV2mpRBLQ") + .unwrap(), + }, + mapping: Mapping { + api_version: semver::Version::from_str("0.0.7").unwrap(), + kind: "substreams/graph-entities".into(), + handler: crate::MappingHandler { + handler: "".to_string(), + runtime: Arc::new(Vec::new()), + }, + abis: vec![], + }, + initial_block: None, + context: Arc::new(None), + } + } + + const TEMPLATE_DATA_SOURCE: &str = r#" + specVersion: 0.0.4 + description: Uniswap is a decentralized protocol for automated token exchange on Ethereum. + repository: https://github.com/Uniswap/uniswap-v3-subgraph + schema: + file: ./schema.graphql + dataSources: + - kind: dataset + name: blocks + network: mainnet + source: + dataset: QmSB1Vw3ZmNX7wwkbPoybK944fDKzLZ3KWLhjbeD9DwyVL + mapping: + kind: substreams/graph-entities + apiVersion: 0.0.7 + file: ./src/mappings/fast.ts + handler: handleBlock + "#; + + #[derive(Debug)] + struct NoopLinkResolver {} + + #[async_trait] + impl LinkResolver for NoopLinkResolver { + fn with_timeout(&self, _timeout: std::time::Duration) -> Box { + unimplemented!() + } + + fn with_retries(&self) -> Box { + unimplemented!() + } + + async fn cat(&self, _logger: &Logger, _link: &Link) -> Result, Error> { + todo!() + } + + async fn get_block(&self, _logger: &Logger, _link: &Link) -> Result, Error> { + unimplemented!() + } + + async fn json_stream( + &self, + _logger: &Logger, + _link: &Link, + ) -> Result { + unimplemented!() + } + } +} diff --git a/chain/dataset/src/lib.rs b/chain/dataset/src/lib.rs new file mode 100644 index 00000000000..db6d9d152a3 --- /dev/null +++ b/chain/dataset/src/lib.rs @@ -0,0 +1,11 @@ +mod chain; +mod data_source; +mod runtime; +mod trigger; + +pub mod mapper; + +pub use crate::chain::Chain; +pub use chain::*; +pub use data_source::*; +pub use trigger::*; diff --git a/chain/dataset/src/mapper.rs b/chain/dataset/src/mapper.rs new file mode 100644 index 00000000000..75f05c311f4 --- /dev/null +++ b/chain/dataset/src/mapper.rs @@ -0,0 +1,80 @@ +use anyhow::{anyhow, Error}; +use graph::blockchain::block_stream::{ + BlockStreamError, BlockStreamEvent, BlockStreamMapper, BlockWithTriggers, FirehoseCursor, +}; +use graph::blockchain::BlockTime; +use graph::prelude::BlockPtr; +use graph::prelude::{async_trait, BlockHash, BlockNumber, Logger}; +use graph::slog::error; +use graph::substreams::Clock; + +use crate::Chain; + +// WasmBlockMapper will not perform any transformation to the block and cannot make assumptions +// about the block format. This mode just works a passthrough from the block stream to the subgraph +// mapping which will do the decoding and store actions. +pub struct WasmBlockMapper { + pub handler: String, +} + +#[async_trait] +impl BlockStreamMapper for WasmBlockMapper { + fn decode_block( + &self, + _output: Option<&[u8]>, + ) -> Result, BlockStreamError> { + unreachable!("WasmBlockMapper does not do block decoding") + } + + async fn block_with_triggers( + &self, + _logger: &Logger, + _block: crate::Block, + ) -> Result, BlockStreamError> { + unreachable!("WasmBlockMapper does not do trigger decoding") + } + + async fn handle_substreams_block( + &self, + logger: &Logger, + clock: Clock, + cursor: FirehoseCursor, + block: Vec, + ) -> Result, BlockStreamError> { + let Clock { + id, + number, + timestamp, + } = clock; + + let block_ptr = BlockPtr { + hash: BlockHash::from(id.into_bytes()), + number: BlockNumber::from(TryInto::::try_into(number).map_err(Error::from)?), + }; + + let block_data = block.into_boxed_slice(); + + // `timestamp` is an `Option`, but it should always be set + let timestamp = match timestamp { + None => { + error!(logger, + "Substream block is missing a timestamp"; + "cursor" => cursor.to_string(), + "number" => number, + ); + return Err(anyhow!( + "Substream block is missing a timestamp at cursor {cursor}, block number {number}" + )).map_err(BlockStreamError::from); + } + Some(ts) => BlockTime::since_epoch(ts.seconds, ts.nanos as u32), + }; + + Ok(BlockStreamEvent::ProcessWasmBlock( + block_ptr, + timestamp, + block_data, + self.handler.clone(), + cursor, + )) + } +} diff --git a/chain/dataset/src/runtime.rs b/chain/dataset/src/runtime.rs new file mode 100644 index 00000000000..a0809aa243e --- /dev/null +++ b/chain/dataset/src/runtime.rs @@ -0,0 +1,70 @@ +use std::sync::Arc; + +use anyhow::Error; +use graph::{ + blockchain::{self, HostFn}, + prelude::{CheapClone, EthereumCallCache}, +}; +use graph_chain_ethereum::{ + network::EthereumNetworkAdapters, + runtime::runtime_adapter::{eth_get_balance, ethereum_call}, + NodeCapabilities, +}; + +use crate::{Chain, DataSource}; + +pub struct RuntimeAdapter { + pub eth_adapters: Arc, + pub call_cache: Arc, +} + +impl blockchain::RuntimeAdapter for RuntimeAdapter { + fn host_fns(&self, ds: &DataSource) -> Result, Error> { + let abis = ds + .mapping + .abis + .iter() + .map(|mapping| { + Arc::new(graph_chain_ethereum::MappingABI::from( + mapping.as_ref().clone(), + )) + }) + .collect::>>(); + let call_cache = self.call_cache.cheap_clone(); + let eth_adapters = self.eth_adapters.cheap_clone(); + + let ethereum_call = HostFn { + name: "ethereum.call", + func: Arc::new(move |ctx, wasm_ptr| { + // Ethereum calls should prioritise call-only adapters if one is available. + let eth_adapter = eth_adapters.call_or_cheapest(Some(&NodeCapabilities { + archive: true, + traces: false, + }))?; + ethereum_call( + ð_adapter, + call_cache.cheap_clone(), + ctx, + wasm_ptr, + &abis, + None, + ) + .map(|ptr| ptr.wasm_ptr()) + }), + }; + + let eth_adapters = self.eth_adapters.cheap_clone(); + let ethereum_get_balance = HostFn { + name: "ethereum.getBalance", + func: Arc::new(move |ctx, wasm_ptr| { + let eth_adapter = eth_adapters.cheapest_with(&NodeCapabilities { + archive: true, + traces: false, + })?; + eth_get_balance(ð_adapter, ctx, wasm_ptr).map(|ptr| ptr.wasm_ptr()) + }), + }; + + Ok(vec![ethereum_call, ethereum_get_balance]) + } +} diff --git a/chain/dataset/src/trigger.rs b/chain/dataset/src/trigger.rs new file mode 100644 index 00000000000..2c5c88af860 --- /dev/null +++ b/chain/dataset/src/trigger.rs @@ -0,0 +1,163 @@ +use std::sync::Arc; + +use anyhow::Error; +use graph::{ + blockchain::{ + self, block_stream::BlockWithTriggers, BlockPtr, EmptyNodeCapabilities, MappingTriggerTrait, + }, + components::{ + store::SubgraphFork, + subgraph::{MappingError, SharedProofOfIndexing}, + }, + data_source, + prelude::{anyhow, async_trait, BlockHash, BlockNumber, BlockState, RuntimeHostBuilder}, + slog::Logger, +}; +use graph_runtime_wasm::module::ToAscPtr; + +use crate::{Block, Chain, NoopDataSourceTemplate}; + +#[derive(Eq, PartialEq, PartialOrd, Ord, Debug)] +pub struct TriggerData {} + +impl MappingTriggerTrait for TriggerData { + fn error_context(&self) -> String { + "Failed to process substreams block".to_string() + } +} + +impl blockchain::TriggerData for TriggerData { + // TODO(filipe): Can this be improved with some data from the block? + fn error_context(&self) -> String { + "Failed to process substreams block".to_string() + } + + fn address_match(&self) -> Option<&[u8]> { + None + } +} + +impl ToAscPtr for TriggerData { + // substreams doesn't rely on wasm on the graph-node so this is not needed. + fn to_asc_ptr( + self, + _heap: &mut H, + _gas: &graph::runtime::gas::GasCounter, + ) -> Result, graph::runtime::HostExportError> { + unimplemented!() + } +} + +#[derive(Debug, Clone, Default)] +pub struct TriggerFilter { + pub handler: Option, +} + +#[cfg(debug_assertions)] +impl TriggerFilter {} + +// TriggerFilter should bypass all triggers and just rely on block since all the data received +// should already have been processed. +impl blockchain::TriggerFilter for TriggerFilter { + fn extend_with_template(&mut self, _data_source: impl Iterator) { + } + + /// this function is not safe to call multiple times, only one DataSource is supported for + /// + fn extend<'a>( + &mut self, + mut data_sources: impl Iterator + Clone, + ) { + let Self { handler } = self; + + // Only one handler support, it has already been added. + if handler.is_some() { + return; + } + + if let Some(ds) = data_sources.next() { + *handler = Some(ds.mapping.handler.handler.clone()); + } + } + + fn node_capabilities(&self) -> EmptyNodeCapabilities { + EmptyNodeCapabilities::default() + } + + fn to_firehose_filter(self) -> Vec { + unimplemented!("this should never be called for this type") + } +} + +pub struct TriggersAdapter {} + +#[async_trait] +impl blockchain::TriggersAdapter for TriggersAdapter { + async fn ancestor_block( + &self, + _ptr: BlockPtr, + _offset: BlockNumber, + ) -> Result, Error> { + unimplemented!() + } + + async fn scan_triggers( + &self, + _from: BlockNumber, + _to: BlockNumber, + _filter: &TriggerFilter, + ) -> Result>, Error> { + unimplemented!() + } + + async fn triggers_in_block( + &self, + _logger: &Logger, + _block: Block, + _filter: &TriggerFilter, + ) -> Result, Error> { + unimplemented!() + } + + async fn is_on_main_chain(&self, _ptr: BlockPtr) -> Result { + unimplemented!() + } + + async fn parent_ptr(&self, block: &BlockPtr) -> Result, Error> { + // This seems to work for a lot of the firehose chains. + Ok(Some(BlockPtr { + hash: BlockHash::from(vec![0xff; 32]), + number: block.number.saturating_sub(1), + })) + } +} + +pub struct TriggerProcessor {} + +impl TriggerProcessor { + pub fn new() -> Self { + Self {} + } +} + +#[async_trait] +impl graph::prelude::TriggerProcessor for TriggerProcessor +where + T: RuntimeHostBuilder, +{ + async fn process_trigger<'a>( + &'a self, + _logger: &Logger, + _: Box + Send + 'a>, + _block: &Arc, + _trigger: &data_source::TriggerData, + _state: BlockState, + _proof_of_indexing: &SharedProofOfIndexing, + _causality_region: &str, + _debug_fork: &Option>, + _subgraph_metrics: &Arc, + _instrument: bool, + ) -> Result { + unreachable!("datasets dont do trigger processing") + } +} diff --git a/chain/ethereum/src/chain.rs b/chain/ethereum/src/chain.rs index 0754e7e2146..28b7bd0228c 100644 --- a/chain/ethereum/src/chain.rs +++ b/chain/ethereum/src/chain.rs @@ -3,6 +3,7 @@ use anyhow::{Context, Error}; use graph::blockchain::client::ChainClient; use graph::blockchain::firehose_block_ingestor::{FirehoseBlockIngestor, Transforms}; use graph::blockchain::{BlockIngestor, BlockTime, BlockchainKind, TriggersAdapterSelector}; +use graph::components::metrics::stopwatch::StopwatchMetrics; use graph::components::store::DeploymentCursorTracker; use graph::data::subgraph::UnifiedMappingApiVersion; use graph::firehose::{FirehoseEndpoint, ForkStep}; @@ -90,7 +91,11 @@ impl BlockStreamBuilder for EthereumStreamBuilder { .subgraph_logger(&deployment) .new(o!("component" => "FirehoseBlockStream")); - let firehose_mapper = Arc::new(FirehoseMapper { adapter, filter }); + let firehose_mapper = Arc::new(FirehoseMapper { + adapter, + filter, + raw_block_output: true, + }); Ok(Box::new(FirehoseBlockStream::new( deployment.hash, @@ -371,6 +376,7 @@ impl Blockchain for Chain { start_blocks: Vec, filter: Arc, unified_api_version: UnifiedMappingApiVersion, + _stopwatch: StopwatchMetrics, ) -> Result>, Error> { let current_ptr = store.block_ptr(); match self.chain_client().as_ref() { @@ -739,6 +745,7 @@ impl TriggersAdapterTrait for TriggersAdapter { } pub struct FirehoseMapper { + raw_block_output: bool, adapter: Arc>, filter: Arc, } @@ -819,6 +826,17 @@ impl FirehoseMapperTrait for FirehoseMapper { use firehose::ForkStep::*; match step { StepNew => { + if self.raw_block_output { + return Ok(BlockStreamEvent::ProcessWasmBlock( + BlockPtr::new(block.hash(), block.number()), + BlockTime::NONE, + // TODO: can we consume this instead? + any_block.value.clone().into_boxed_slice(), + "".to_string(), + response.cursor.clone().into(), + )); + } + // unwrap: Input cannot be None so output will be error or block. let block = self.decode_block(Some(any_block.value.as_ref()))?.unwrap(); let block_with_triggers = self.block_with_triggers(logger, block).await?; diff --git a/chain/ethereum/src/runtime/runtime_adapter.rs b/chain/ethereum/src/runtime/runtime_adapter.rs index 868079402e7..1940af98ae9 100644 --- a/chain/ethereum/src/runtime/runtime_adapter.rs +++ b/chain/ethereum/src/runtime/runtime_adapter.rs @@ -115,7 +115,7 @@ impl blockchain::RuntimeAdapter for RuntimeAdapter { } /// function ethereum.call(call: SmartContractCall): Array | null -fn ethereum_call( +pub fn ethereum_call( eth_adapter: &EthereumAdapter, call_cache: Arc, ctx: HostFnCtx, @@ -151,7 +151,7 @@ fn ethereum_call( } } -fn eth_get_balance( +pub fn eth_get_balance( eth_adapter: &EthereumAdapter, ctx: HostFnCtx<'_>, wasm_ptr: u32, diff --git a/chain/near/src/chain.rs b/chain/near/src/chain.rs index 135350a1deb..4f316628e51 100644 --- a/chain/near/src/chain.rs +++ b/chain/near/src/chain.rs @@ -6,6 +6,7 @@ use graph::blockchain::{ BasicBlockchainBuilder, BlockIngestor, BlockchainBuilder, BlockchainKind, NoopRuntimeAdapter, }; use graph::cheap_clone::CheapClone; +use graph::components::metrics::stopwatch::StopwatchMetrics; use graph::components::store::DeploymentCursorTracker; use graph::data::subgraph::UnifiedMappingApiVersion; use graph::env::EnvVars; @@ -226,6 +227,7 @@ impl Blockchain for Chain { start_blocks: Vec, filter: Arc, unified_api_version: UnifiedMappingApiVersion, + _stopwatch: StopwatchMetrics, ) -> Result>, Error> { if self.prefer_substreams { return self diff --git a/chain/starknet/src/chain.rs b/chain/starknet/src/chain.rs index 060a502d80d..b901475b4c0 100644 --- a/chain/starknet/src/chain.rs +++ b/chain/starknet/src/chain.rs @@ -14,7 +14,10 @@ use graph::{ RuntimeAdapter as RuntimeAdapterTrait, }, cheap_clone::CheapClone, - components::store::{DeploymentCursorTracker, DeploymentLocator}, + components::{ + metrics::stopwatch::StopwatchMetrics, + store::{DeploymentCursorTracker, DeploymentLocator}, + }, data::subgraph::UnifiedMappingApiVersion, env::EnvVars, firehose::{self, FirehoseEndpoint, ForkStep}, @@ -110,6 +113,7 @@ impl Blockchain for Chain { start_blocks: Vec, filter: Arc, unified_api_version: UnifiedMappingApiVersion, + _stopwatch: StopwatchMetrics, ) -> Result>, Error> { self.block_stream_builder .build_firehose( diff --git a/chain/substreams/src/chain.rs b/chain/substreams/src/chain.rs index 1dad1764707..6dafa19c721 100644 --- a/chain/substreams/src/chain.rs +++ b/chain/substreams/src/chain.rs @@ -5,6 +5,7 @@ use graph::blockchain::client::ChainClient; use graph::blockchain::{ BasicBlockchainBuilder, BlockIngestor, BlockTime, EmptyNodeCapabilities, NoopRuntimeAdapter, }; +use graph::components::metrics::stopwatch::StopwatchMetrics; use graph::components::store::DeploymentCursorTracker; use graph::env::EnvVars; use graph::firehose::FirehoseEndpoints; @@ -136,6 +137,7 @@ impl Blockchain for Chain { _start_blocks: Vec, filter: Arc, _unified_api_version: UnifiedMappingApiVersion, + _stopwatch: StopwatchMetrics, ) -> Result>, Error> { self.block_stream_builder .build_substreams( diff --git a/core/Cargo.toml b/core/Cargo.toml index f2b73150f0a..521b4a9686b 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -18,13 +18,27 @@ graph-chain-near = { path = "../chain/near" } graph-chain-cosmos = { path = "../chain/cosmos" } graph-chain-substreams = { path = "../chain/substreams" } graph-chain-starknet = { path = "../chain/starknet" } +graph-chain-dataset = { path = "../chain/dataset" } graph-runtime-wasm = { path = "../runtime/wasm" } +lru_time_cache = "0.11" +semver = "1.0.21" +serde = "1.0" +serde_json = "1.0" serde_yaml = "0.9.21" # Switch to crates.io once tower 0.5 is released tower = { git = "https://github.com/tower-rs/tower.git", features = ["full"] } cid = "0.11.0" anyhow = "1.0" +# The version already used in the project is very old and upgrade is currently blocked. +once_cell = "1.18.0" +ethabi = "17.0" +substreams = "0.5.0" +substreams-ethereum = "0.8.0" + +uniswap = { path = "../transforms/uniswap" } +sled = "0.34.7" + [dev-dependencies] tower-test = { git = "https://github.com/tower-rs/tower.git" } ipfs-api-backend-hyper = "0.6" diff --git a/core/src/subgraph/context/instance/mod.rs b/core/src/subgraph/context/instance/mod.rs index 49e5cfd3c86..aab17d97248 100644 --- a/core/src/subgraph/context/instance/mod.rs +++ b/core/src/subgraph/context/instance/mod.rs @@ -64,20 +64,20 @@ where } pub fn new( - manifest: SubgraphManifest, + manifest: &SubgraphManifest, host_builder: T, host_metrics: Arc, causality_region_seq: CausalityRegionSeq, ) -> Self { let subgraph_id = manifest.id.clone(); let network = manifest.network_name(); - let templates = Arc::new(manifest.templates); + let templates = Arc::new(manifest.templates.clone()); SubgraphInstance { host_builder, subgraph_id, network, - static_data_sources: Arc::new(manifest.data_sources), + static_data_sources: Arc::new(manifest.data_sources.clone()), onchain_hosts: OnchainHosts::new(), offchain_hosts: OffchainHosts::new(), module_cache: HashMap::new(), diff --git a/core/src/subgraph/context/mod.rs b/core/src/subgraph/context/mod.rs index 6c87e095d1e..5076f7ffe4c 100644 --- a/core/src/subgraph/context/mod.rs +++ b/core/src/subgraph/context/mod.rs @@ -78,7 +78,7 @@ where impl> IndexingContext { pub fn new( - manifest: SubgraphManifest, + manifest: &SubgraphManifest, host_builder: T, host_metrics: Arc, causality_region_seq: CausalityRegionSeq, @@ -87,7 +87,7 @@ impl> IndexingContext { trigger_processor: Box>, ) -> Self { let instance = SubgraphInstance::new( - manifest, + &manifest, host_builder, host_metrics.clone(), causality_region_seq, diff --git a/core/src/subgraph/inputs.rs b/core/src/subgraph/inputs.rs index b2e95c753f5..e052ace91b0 100644 --- a/core/src/subgraph/inputs.rs +++ b/core/src/subgraph/inputs.rs @@ -1,7 +1,8 @@ use graph::{ blockchain::{Blockchain, TriggersAdapter}, + cheap_clone::CheapClone, components::{ - store::{DeploymentLocator, SubgraphFork, WritableStore}, + store::{DeploymentLocator, SubgraphFork, SubgraphStore, WritableStore}, subgraph::ProofOfIndexingVersion, }, data::subgraph::{SubgraphFeature, UnifiedMappingApiVersion}, @@ -12,6 +13,7 @@ use std::collections::BTreeSet; use std::sync::Arc; pub struct IndexingInputs { + pub dataset: Option, pub deployment: DeploymentLocator, pub features: BTreeSet, pub start_blocks: Vec, @@ -26,6 +28,7 @@ pub struct IndexingInputs { pub static_filters: bool, pub poi_version: ProofOfIndexingVersion, pub network: String, + pub subgraph_store: Arc, /// Whether to instrument trigger processing and log additional, /// possibly expensive and noisy, information @@ -50,6 +53,8 @@ impl IndexingInputs { poi_version, network, instrument, + subgraph_store, + dataset, } = self; IndexingInputs { deployment: deployment.clone(), @@ -67,6 +72,8 @@ impl IndexingInputs { poi_version: *poi_version, network: network.clone(), instrument: *instrument, + subgraph_store: subgraph_store.cheap_clone(), + dataset: dataset.clone(), } } } diff --git a/core/src/subgraph/instance_manager.rs b/core/src/subgraph/instance_manager.rs index d0176238013..6bd7f7db65c 100644 --- a/core/src/subgraph/instance_manager.rs +++ b/core/src/subgraph/instance_manager.rs @@ -5,9 +5,11 @@ use crate::subgraph::loader::load_dynamic_data_sources; use std::collections::BTreeSet; use crate::subgraph::runner::SubgraphRunner; +use anyhow::bail; use graph::blockchain::block_stream::BlockStreamMetrics; use graph::blockchain::{Blockchain, BlockchainKind, DataSource, NodeCapabilities}; use graph::components::metrics::gas::GasMetrics; +use graph::components::store::SubgraphSegment; use graph::components::subgraph::ProofOfIndexingVersion; use graph::data::subgraph::{UnresolvedSubgraphManifest, SPEC_VERSION_0_0_6}; use graph::data_source::causality_region::CausalityRegionSeq; @@ -50,7 +52,7 @@ impl SubgraphInstanceManagerTrait for SubgraphInstanceManager< let subgraph_start_future = async move { match BlockchainKind::from_manifest(&manifest)? { BlockchainKind::Arweave => { - let runner = instance_manager + let runner = instance_manager .build_subgraph_runner::( logger.clone(), self.env_vars.cheap_clone(), @@ -58,13 +60,14 @@ impl SubgraphInstanceManagerTrait for SubgraphInstanceManager< manifest, stop_block, Box::new(SubgraphTriggerProcessor {}), + None, ) .await?; self.start_subgraph_inner(logger, loc, runner).await } BlockchainKind::Ethereum => { - let runner = instance_manager + let runner = instance_manager .build_subgraph_runner::( logger.clone(), self.env_vars.cheap_clone(), @@ -72,13 +75,14 @@ impl SubgraphInstanceManagerTrait for SubgraphInstanceManager< manifest, stop_block, Box::new(SubgraphTriggerProcessor {}), + None, ) .await?; self.start_subgraph_inner(logger, loc, runner).await } BlockchainKind::Near => { - let runner = instance_manager + let runner = instance_manager .build_subgraph_runner::( logger.clone(), self.env_vars.cheap_clone(), @@ -86,13 +90,14 @@ impl SubgraphInstanceManagerTrait for SubgraphInstanceManager< manifest, stop_block, Box::new(SubgraphTriggerProcessor {}), + None, ) .await?; self.start_subgraph_inner(logger, loc, runner).await } BlockchainKind::Cosmos => { - let runner = instance_manager + let runner = instance_manager .build_subgraph_runner::( logger.clone(), self.env_vars.cheap_clone(), @@ -100,13 +105,14 @@ impl SubgraphInstanceManagerTrait for SubgraphInstanceManager< manifest, stop_block, Box::new(SubgraphTriggerProcessor {}), + None, ) .await?; self.start_subgraph_inner(logger, loc, runner).await } BlockchainKind::Substreams => { - let runner = instance_manager + let runner = instance_manager .build_subgraph_runner::( logger.clone(), self.env_vars.cheap_clone(), @@ -114,13 +120,14 @@ impl SubgraphInstanceManagerTrait for SubgraphInstanceManager< manifest, stop_block, Box::new(graph_chain_substreams::TriggerProcessor::new(loc.clone())), + None, ) .await?; self.start_subgraph_inner(logger, loc, runner).await } BlockchainKind::Starknet => { - let runner = instance_manager + let runner = instance_manager .build_subgraph_runner::( logger.clone(), self.env_vars.cheap_clone(), @@ -128,9 +135,47 @@ impl SubgraphInstanceManagerTrait for SubgraphInstanceManager< manifest, stop_block, Box::new(SubgraphTriggerProcessor {}), + None, + ) + .await?; + + self.start_subgraph_inner(logger, loc, runner).await + } + + BlockchainKind::Dataset => { + // HACK: find a better way to do this + let ds_loc = { + let manifest: UnresolvedSubgraphManifest = UnresolvedSubgraphManifest::parse(loc.hash.cheap_clone(), manifest.clone())?; + let ds: &graph_chain_dataset::UnresolvedDataSource = manifest + .data_sources + .first() + .expect("Datastores have at least 1 data set").as_onchain().unwrap(); + + match instance_manager + .subgraph_store + .active_locator(&ds.source.dataset)? { + Some(loc)=> loc, + None => + bail!("dataset {} not found. please ensure it has been deployed before start {}", ds.source.dataset, loc.hash), + } + + + }; + + + let runner = instance_manager + .build_subgraph_runner::( + logger.clone(), + self.env_vars.cheap_clone(), + loc.clone(), + manifest, + stop_block, + Box::new(graph_chain_dataset::TriggerProcessor {}), + Some(ds_loc), ) .await?; + self.start_subgraph_inner(logger, loc, runner).await } } @@ -208,6 +253,7 @@ impl SubgraphInstanceManager { manifest: serde_yaml::Mapping, stop_block: Option, tp: Box>>, + dataset: Option, ) -> anyhow::Result>> where C: Blockchain, @@ -273,6 +319,7 @@ impl SubgraphInstanceManager { .writable( logger.clone(), deployment.id, + SubgraphSegment::default(), Arc::new(manifest.template_idx_and_name().collect()), ) .await?; @@ -424,6 +471,8 @@ impl SubgraphInstanceManager { poi_version, network, instrument, + subgraph_store: self.subgraph_store.cheap_clone(), + dataset, }; // Initialize the indexing context, including both static and dynamic data sources. @@ -431,7 +480,7 @@ impl SubgraphInstanceManager { // multiple data sources. let ctx = { let mut ctx = IndexingContext::new( - manifest, + &manifest, host_builder, host_metrics.clone(), causality_region_seq, @@ -451,13 +500,9 @@ impl SubgraphInstanceManager { stream: block_stream_metrics, }; - Ok(SubgraphRunner::new( - inputs, - ctx, - logger.cheap_clone(), - metrics, - env_vars, - )) + let runner = SubgraphRunner::new(inputs, ctx, logger.cheap_clone(), metrics, env_vars); + + Ok(runner) } async fn start_subgraph_inner( diff --git a/core/src/subgraph/registrar.rs b/core/src/subgraph/registrar.rs index 9d4f5d7f4fa..9482a6893e2 100644 --- a/core/src/subgraph/registrar.rs +++ b/core/src/subgraph/registrar.rs @@ -424,6 +424,24 @@ where ) .await? } + BlockchainKind::Dataset => { + create_subgraph_version::( + &logger, + self.store.clone(), + self.chains.cheap_clone(), + name.clone(), + hash.cheap_clone(), + start_block_override, + graft_block_override, + raw, + node_id, + debug_fork, + self.version_switching_mode, + &self.resolver, + history_blocks, + ) + .await? + } }; debug!( diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index b8cb5b5951b..2170bea02e0 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -23,11 +23,14 @@ use graph::data_source::{ offchain, CausalityRegion, DataSource, DataSourceCreationError, TriggerData, }; use graph::env::EnvVars; +use graph::indexer::store::PostgresIndexerDB; +use graph::indexer::{IndexWorker, IndexerContext}; use graph::prelude::*; use graph::schema::EntityKey; use graph::util::{backoff::ExponentialBackoff, lfu_cache::LfuCache}; use std::sync::Arc; use std::time::{Duration, Instant}; +use uniswap::UniswapTransform; const MINUTE: Duration = Duration::from_secs(60); @@ -197,11 +200,21 @@ where loop { debug!(self.logger, "Starting or restarting subgraph"); - let block_stream_canceler = CancelGuard::new(); - let block_stream_cancel_handle = block_stream_canceler.handle(); // TriggerFilter needs to be rebuilt eveytime the blockstream is restarted self.ctx.filter = Some(self.build_filter()); + if self.inputs.deployment.hash.to_string() + == "QmcmBvMt1hbPTtPWaBk7HXwXx71tzAKfa2eZeyV2mpRBLQ" + { + self.populate_dataset().await; + // Stop for now + self.inputs.store.flush().await?; + return Ok(self); + } + + let block_stream_canceler = CancelGuard::new(); + let block_stream_cancel_handle = block_stream_canceler.handle(); + let mut block_stream = new_block_stream( &self.inputs, self.ctx.filter.as_ref().unwrap(), // Safe to unwrap as we just called `build_filter` in the previous line @@ -267,6 +280,45 @@ where } } + async fn populate_dataset(&mut self) { + let store = Arc::new( + PostgresIndexerDB::new( + self.inputs.subgraph_store.cheap_clone(), + self.inputs.deployment.cheap_clone(), + self.logger.cheap_clone(), + self.metrics.subgraph.stopwatch.cheap_clone(), + ) + .await, + ); + + let ctx = Arc::new(IndexerContext { + chain: self.inputs.chain.clone(), + transform: Arc::new(UniswapTransform::new()), + store, + deployment: self.inputs.deployment.clone(), + logger: self.logger.cheap_clone(), + }); + + let iw = IndexWorker {}; + + let earlier = Instant::now(); + iw.run_many( + ctx, + self.inputs.store.clone(), + *self.inputs.start_blocks.iter().min().unwrap(), + // Some(13369621), + None, + Arc::new(self.ctx.filter.as_ref().unwrap().clone()), + self.inputs.unified_api_version.clone(), + 10, + self.metrics.stream.stopwatch.cheap_clone(), + ) + .await + .unwrap(); + let diff = Instant::now().duration_since(earlier); + println!("### All tasks finished: took {}s ###", diff.as_secs()); + } + /// Processes a block and returns the updated context and a boolean flag indicating /// whether new dynamic data sources have been added to the subgraph. async fn process_block( diff --git a/core/src/subgraph/stream.rs b/core/src/subgraph/stream.rs index c1d767e3fcf..f7d314a4535 100644 --- a/core/src/subgraph/stream.rs +++ b/core/src/subgraph/stream.rs @@ -1,7 +1,7 @@ use crate::subgraph::inputs::IndexingInputs; use anyhow::bail; use graph::blockchain::block_stream::{BlockStream, BufferedBlockStream}; -use graph::blockchain::Blockchain; +use graph::blockchain::{Blockchain, BlockchainKind}; use graph::prelude::{CheapClone, Error, SubgraphInstanceMetrics}; use std::sync::Arc; @@ -12,14 +12,22 @@ pub async fn new_block_stream( ) -> Result>, Error> { let is_firehose = inputs.chain.chain_client().is_firehose(); + // HACK: Need to find a better way for this + let deployment = if C::KIND == BlockchainKind::Dataset { + inputs.dataset.clone().unwrap() + } else { + inputs.deployment.clone() + }; + match inputs .chain .new_block_stream( - inputs.deployment.clone(), + deployment, inputs.store.cheap_clone(), inputs.start_blocks.clone(), Arc::new(filter.clone()), inputs.unified_api_version.clone(), + metrics.stopwatch.cheap_clone(), ) .await { diff --git a/graph/Cargo.toml b/graph/Cargo.toml index c26073ce062..289b9bff9f7 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -89,6 +89,9 @@ sqlparser = "0.43.1" csv = "1.3.0" object_store = { version = "0.9.1", features = ["gcp"] } +sled = "0.34.7" +borsh = { version = "1.3.1", features = ["derive"] } + [dev-dependencies] clap = { version = "3.2.25", features = ["derive", "env"] } maplit = "1.0.2" diff --git a/graph/src/blockchain/mock.rs b/graph/src/blockchain/mock.rs index 1319dd5f054..a8b5af8fd02 100644 --- a/graph/src/blockchain/mock.rs +++ b/graph/src/blockchain/mock.rs @@ -1,6 +1,7 @@ use crate::{ components::{ link_resolver::LinkResolver, + metrics::stopwatch::StopwatchMetrics, store::{BlockNumber, DeploymentCursorTracker, DeploymentLocator}, subgraph::InstanceDSTemplateInfo, }, @@ -341,6 +342,7 @@ impl Blockchain for MockBlockchain { _start_blocks: Vec, _filter: Arc, _unified_api_version: UnifiedMappingApiVersion, + _stopwatch: StopwatchMetrics, ) -> Result>, Error> { todo!() } diff --git a/graph/src/blockchain/mod.rs b/graph/src/blockchain/mod.rs index c4ec226ad2f..a2e107527b2 100644 --- a/graph/src/blockchain/mod.rs +++ b/graph/src/blockchain/mod.rs @@ -18,6 +18,7 @@ mod types; use crate::{ cheap_clone::CheapClone, components::{ + metrics::stopwatch::StopwatchMetrics, store::{DeploymentCursorTracker, DeploymentLocator, StoredDynamicDataSource}, subgraph::HostMetrics, subgraph::InstanceDSTemplateInfo, @@ -182,6 +183,7 @@ pub trait Blockchain: Debug + Sized + Send + Sync + Unpin + 'static { start_blocks: Vec, filter: Arc, unified_api_version: UnifiedMappingApiVersion, + stopwatch: StopwatchMetrics, ) -> Result>, Error>; fn chain_store(&self) -> Arc; @@ -421,6 +423,8 @@ pub enum BlockchainKind { Substreams, Starknet, + + Dataset, } impl fmt::Display for BlockchainKind { @@ -432,6 +436,7 @@ impl fmt::Display for BlockchainKind { BlockchainKind::Cosmos => "cosmos", BlockchainKind::Substreams => "substreams", BlockchainKind::Starknet => "starknet", + BlockchainKind::Dataset => "dataset", }; write!(f, "{}", value) } @@ -448,6 +453,7 @@ impl FromStr for BlockchainKind { "cosmos" => Ok(BlockchainKind::Cosmos), "substreams" => Ok(BlockchainKind::Substreams), "starknet" => Ok(BlockchainKind::Starknet), + "dataset" => Ok(BlockchainKind::Dataset), _ => Err(anyhow!("unknown blockchain kind {}", s)), } } diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index 5ec11c69d7b..fb5de96f2d8 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -3,6 +3,7 @@ mod err; mod traits; pub mod write; +use diesel_derives::Queryable; pub use entity_cache::{EntityCache, EntityLfuCache, GetScope, ModificationsAndCache}; use futures03::future::{FutureExt, TryFutureExt}; use slog::{trace, Logger}; @@ -868,6 +869,71 @@ impl DeploymentId { } } +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Default)] +pub struct SubgraphSegmentId(pub i32); + +/// A segment refers to a deployment block range. It is used to limit the scope of the store +/// components to the specific range. This is used for parallel operations targetting the same +/// deployment. The use of `DeploymentSegment::default()` maintains the current behaviour of +/// full access to the entire DeploymentStore. +#[derive(Clone, Debug)] +pub enum SubgraphSegment { + AllBlocks, + Range(SegmentDetails), +} + +impl Default for SubgraphSegment { + fn default() -> Self { + Self::AllBlocks + } +} + +impl SubgraphSegment { + pub fn id(&self) -> Option { + match self { + SubgraphSegment::AllBlocks => None, + SubgraphSegment::Range(details) => Some(details.id), + } + } + + pub fn details(&self) -> Option<&SegmentDetails> { + match self { + SubgraphSegment::AllBlocks => None, + SubgraphSegment::Range(d) => Some(d), + } + } + + pub fn stop_block(&self) -> Option { + match self { + SubgraphSegment::AllBlocks => None, + SubgraphSegment::Range(details) => Some(details.stop_block), + } + } + + pub fn is_complete(&self) -> bool { + match self { + Self::Range(details) => details.is_complete(), + _ => false, + } + } +} + +/// Each segment represents a block range within a subgraph. End block is exclusive. +#[derive(Clone, Debug, Queryable)] +pub struct SegmentDetails { + pub id: SubgraphSegmentId, + pub deployment: DeploymentId, + pub start_block: BlockNumber, + pub stop_block: BlockNumber, + pub current_block: Option, +} + +impl SegmentDetails { + pub fn is_complete(&self) -> bool { + self.current_block.unwrap_or_default() == self.stop_block - 1 + } +} + /// A unique identifier for a deployment that specifies both its external /// identifier (`hash`) and its unique internal identifier (`id`) which /// ensures we are talking about a unique location for the deployment's data diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index d7023eeb8c3..b0f118607c9 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -1,3 +1,5 @@ +use std::ops::Range; + use anyhow::Error; use async_trait::async_trait; use web3::types::{Address, H256}; @@ -175,10 +177,15 @@ pub trait SubgraphStore: Send + Sync + 'static { /// /// The `manifest_idx_and_name` lists the correspondence between data /// source or template position in the manifest and name. + /// + /// The `Segment` restricts the write operations to a range of blocks, + /// this means that several writables can exist at one time and they will only + /// write to the block range specified by the segment. async fn writable( self: Arc, logger: Logger, deployment: DeploymentId, + segment: SubgraphSegment, manifest_idx_and_name: Arc>, ) -> Result, StoreError>; @@ -364,6 +371,26 @@ pub trait WritableStore: ReadStore + DeploymentCursorTracker { /// The maximum assigned causality region. Any higher number is therefore free to be assigned. async fn causality_region_curr_val(&self) -> Result, StoreError>; + /// Persists a set of segments according to the provided details. If called more than once + /// this should be a NOOP. + async fn create_segments( + &self, + deployment: DeploymentId, + segments: Vec, + ) -> Result, StoreError>; + + async fn get_segments( + &self, + deployment: DeploymentId, + ) -> Result, StoreError>; + + /// Forces `current_block` to `stop_block`-1. Passing an already complete + /// segment should be a NOOP. + async fn mark_subgraph_segment_complete( + &self, + segment: SegmentDetails, + ) -> Result<(), StoreError>; + /// Report the name of the shard in which the subgraph is stored. This /// should only be used for reporting and monitoring fn shard(&self) -> &str; diff --git a/graph/src/data/subgraph/mod.rs b/graph/src/data/subgraph/mod.rs index 4563f5862eb..f860a162446 100644 --- a/graph/src/data/subgraph/mod.rs +++ b/graph/src/data/subgraph/mod.rs @@ -543,7 +543,7 @@ impl IntoValue for DeploymentFeatures { } } -#[derive(Debug, Deserialize)] +#[derive(Debug, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct BaseSubgraphManifest { pub id: DeploymentHash, @@ -562,7 +562,7 @@ pub struct BaseSubgraphManifest { pub indexer_hints: Option, } -#[derive(Debug, Deserialize)] +#[derive(Debug, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct IndexerHints { prune: Option, @@ -577,7 +577,7 @@ impl IndexerHints { } } -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum Prune { Auto, Never, diff --git a/graph/src/data_source/mod.rs b/graph/src/data_source/mod.rs index 2c982146c16..bcae4277592 100644 --- a/graph/src/data_source/mod.rs +++ b/graph/src/data_source/mod.rs @@ -258,6 +258,12 @@ pub enum UnresolvedDataSource { } impl UnresolvedDataSource { + pub fn as_onchain(&self) -> Option<&C::UnresolvedDataSource> { + match self { + UnresolvedDataSource::Onchain(ds) => Some(ds), + UnresolvedDataSource::Offchain(_) => None, + } + } pub async fn resolve( self, resolver: &Arc, diff --git a/graph/src/env/mod.rs b/graph/src/env/mod.rs index 9e2b41c75be..02fcaa55b12 100644 --- a/graph/src/env/mod.rs +++ b/graph/src/env/mod.rs @@ -199,10 +199,10 @@ pub struct EnvVars { /// The amount of history to keep when using 'min' historyBlocks /// in the manifest pub min_history_blocks: BlockNumber, - /// Set by the env var `dips_metrics_object_store_url` /// The name of the object store bucket to store DIPS metrics pub dips_metrics_object_store_url: Option, + pub dataset_subgraphs_enabled: bool, } impl EnvVars { @@ -273,6 +273,7 @@ impl EnvVars { .min_history_blocks .unwrap_or(2 * inner.reorg_threshold), dips_metrics_object_store_url: inner.dips_metrics_object_store_url, + dataset_subgraphs_enabled: inner.experimental_dataset_subgraphs_enabled.0, }) } @@ -357,6 +358,8 @@ struct Inner { log_levels: Option, #[envconfig(from = "EXPERIMENTAL_STATIC_FILTERS", default = "false")] experimental_static_filters: EnvVarBoolean, + #[envconfig(from = "EXPERIMENTAL_DATASET_SUBGRAPHS_ENABLED", default = "true")] + experimental_dataset_subgraphs_enabled: EnvVarBoolean, #[envconfig( from = "EXPERIMENTAL_SUBGRAPH_VERSION_SWITCHING_MODE", default = "instant" diff --git a/graph/src/indexer/README.md b/graph/src/indexer/README.md new file mode 100644 index 00000000000..ace8a7c662c --- /dev/null +++ b/graph/src/indexer/README.md @@ -0,0 +1,75 @@ +# Pre-Indexer for subgraphs + +## Design +The pre indexer will traverse all the blocks, according to some filters which are currently defined +per chain. For each block it will run the mappings with the block and a state kv as input and store +the resulting triggers for the block, these will later be used as input for subgraphs instead of +raw blockchain data. + +By exposing the state, it will allow users to maintain their own logic for handling dynamic data sources +as well as any derived data. + +The state is expected to be returned after every block and passed on to the next. The state will +not be available for querying and only the latest version is kept between blocks and it will be +limited in size, through a mechanism we will defined later on. + +If state is not used then all the processing will happen in parallel until it reaches the chain head. + +## State +State refers to intermediate state between blocks (think the state for fold operations). It is only +queryable from the pre-indexer, subgraphs and graphql don't have access to it. + +In order to support reverts, it is necessary to be able to retrieve a previous state. State should +store a log of delta operations as well as a snapshot every TBD amount of blocks. Retrieving an old state +will be possible by getting the latest snapshot and applying the delta operations between that block and +the block it is needed at. + +This state is necessary so that users can keep track of things like created contracts on ethereum. + +State is indexed by a string key and an optional tag and will store a Vec. This means that anything +stored in the state should ideally use a serializable binary format like borsh or protobuf. + +The key is designed to be an ID or unique value and tag helps query items by tag. As an example: + +``` + store.set("123", "token", ...) + store.set("321", "token", ...) + store.get_all("token") // Should yield both the previous values. +``` + +## Processing +The pre-indexer will iterate over all the blocks coming from firehose/substreams, this means it is +possible to apply filters to the incoming data so that the processing is quicker. The main note +about the processing is that if state is not used, the entire block space being scanned can be +partitioned and handled in parallel. + +## Parallel Processing +The worker will calculate the range between the last stable block (if present) and the chain head +minus the Reorg threshold. For a given number of workers, each worker will get a range starting the +oldest stable block or start block. + +As confirmation of completion arrives from the older block ranges the last stable block is updated +and the db is flushed to ensure data is written to disk before continuing. + +### Last Stable Block +Last Stable block is the property that can be observed from subgraphs block streams in order to know +if the data that follows is ready for processing so it acts as a barrier to protect from consuming +state that is still in flight. + +### Recovery +In case of a failure, only blocks before LSB are considered valid and the rest will be overwritten +by running the same process again. + +### Cancellation +If an error occurs within one of the ranges, the error should propagate to the orchestration function +which should cancel all the ranges as soon as possible. + +## Store +The store is a mapping of BlockNumber to the list of triggers for that block, where the order will be +preserved. + +## Transformations +The transformations is similar to the mappings on subgraphs, they provide the code that performs the +data extraction from blocks. Transformations take as input the previous state and the block, returning +the new state and a list of encoded triggers, that is, the values to be stored for the processed block, +which are later used as inputs for subgraphs. diff --git a/graph/src/indexer/TODO.md b/graph/src/indexer/TODO.md new file mode 100644 index 00000000000..934339a4a2c --- /dev/null +++ b/graph/src/indexer/TODO.md @@ -0,0 +1,45 @@ +# TODO + +- [] SledStore + - [x] snapshot store + - [x] state read + - [] tests +- [] sequential worker +- [] paralell worker + - [x] handle ack + - [x] update head + - [] error handling + - [x] set latest stable block + - [] set latest block as soon as it is ready + - [] handle reverts +- [x] State + - [x] Delta rebuild + - [x] set get +- [] patch Blockstream + - [x] passthrough vec + - [] firehose mapper + - [] remove clone +- [] Uniswap toy example +- [] perf tests + - [x] consume all the blocks in parallel, empty transform + - [x] block stream from kv to run subgraph + - [x] minimal uniswap example using new architecture +- [] binary to check data? +- [] test different stores + - [] postgres? + - [] rocksdb + - [x] sleddb +- [] dataset blockstream + - [] try to avoid cloning the handler every block +- [] cleanup println and unwrap +- [] Runner + - [] pass deployment hash + - [] runtime adapter needs to be added for the original chain +- [] DataSet Blockchain + - [] How to get the ethereum/others network_names and configs? + - [] Fix datasource hack +- [] Store + - [] Make it not forward only + - [] Introduce partitions? + - [] Get all keys + - [] Write entity update diff --git a/graph/src/indexer/block_stream.rs b/graph/src/indexer/block_stream.rs new file mode 100644 index 00000000000..9e59828ca44 --- /dev/null +++ b/graph/src/indexer/block_stream.rs @@ -0,0 +1,399 @@ +use crate::blockchain::block_stream::{ + BlockStream, BlockStreamError, BlockStreamEvent, FirehoseCursor, +}; +use crate::blockchain::{BlockTime, Blockchain}; +use crate::components::store::{SegmentDetails, SubgraphSegment}; +use crate::prelude::*; +use crate::util::backoff::ExponentialBackoff; +use async_stream::try_stream; +use futures03::{Stream, StreamExt}; +use std::ops::Range; +use std::sync::Arc; +use std::task::{Context, Poll}; +use std::time::{Duration, Instant}; +use tokio::sync::mpsc; + +use super::IndexerStore; + +pub const INDEXER_STREAM_BUFFER_STREAM_SIZE: usize = 10; + +pub struct IndexerBlockStream { + //fixme: not sure if this is ok to be set as public, maybe + // we do not want to expose the stream to the caller + stream: Pin, BlockStreamError>> + Send>>, +} + +impl IndexerBlockStream +where + C: Blockchain, +{ + pub fn new( + from: DeploymentHash, + store: Arc, + subgraph_current_block: Option, + start_blocks: Vec, + end_blocks: Vec, + logger: Logger, + handler: String, + registry: Arc, + ) -> Self { + let metrics = IndexerStreamMetrics::new(registry, from.clone()); + + let start_block = start_blocks.iter().min(); + let start_block = match (subgraph_current_block, start_block) { + (None, None) => 0, + (None, Some(i)) => *i, + (Some(ptr), _) => ptr.number, + }; + + IndexerBlockStream { + stream: Box::pin(stream_blocks( + from, + store, + logger, + handler, + metrics, + start_block, + end_blocks.iter().min().map(|n| *n), + )), + } + } +} + +fn stream_blocks( + from: DeploymentHash, + store: Arc, + logger: Logger, + handler: String, + _metrics: IndexerStreamMetrics, + start_block: BlockNumber, + stop_block: Option, +) -> impl Stream, BlockStreamError>> { + // Back off exponentially whenever we encounter a connection error or a stream with bad data + let _backoff = ExponentialBackoff::new(Duration::from_millis(500), Duration::from_secs(45)); + let stop_block = stop_block.unwrap_or(i32::MAX); + + // This attribute is needed because `try_stream!` seems to break detection of `skip_backoff` assignments + #[allow(unused_assignments)] + let mut skip_backoff = false; + + try_stream! { + let logger = logger.new(o!("from_deployment" => from.clone())); + + loop { + info!( + &logger, + "IndexerStream starting"; + "start_block" => start_block, + "stop_block" => stop_block, + ); + + // We just reconnected, assume that we want to back off on errors + skip_backoff = false; + + let (tx, mut rx) = mpsc::channel(100); + let store = store.cheap_clone(); + let handle = crate::spawn(async move { + store.stream_from(start_block, tx).await + }); + + + info!(&logger, "IndexerStream started"); + + // Track the time it takes to set up the block stream + // metrics.observe_successful_connection(&mut connect_start, &endpoint.provider); + + let mut last = 0; + loop { + let response = rx.recv().await; + match response { + None => { + debug!(&logger, "channel has been closed"); + break; + }, + Some((block_ptr, triggers)) => { + if block_ptr.number>last+1000 { + debug!(&logger,"block_ptr: {}", block_ptr); + last = block_ptr.number; + } + yield BlockStreamEvent::ProcessWasmBlock(block_ptr, BlockTime::NONE, triggers.0, handler.clone(), FirehoseCursor::None); + } + } + + } + match handle.await { + Ok(_) => {debug!(&logger, "restart...")}, + Err(err) => debug!(&logger, "IndexerStream error, restarting. error: {:?}", err), + }; + } + } +} + +impl Stream for IndexerBlockStream { + type Item = Result, BlockStreamError>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.stream.poll_next_unpin(cx) + } +} + +impl BlockStream for IndexerBlockStream { + fn buffer_size_hint(&self) -> usize { + INDEXER_STREAM_BUFFER_STREAM_SIZE + } +} + +struct IndexerStreamMetrics { + deployment: DeploymentHash, + restarts: CounterVec, + connect_duration: GaugeVec, + time_between_responses: HistogramVec, + responses: CounterVec, +} + +impl IndexerStreamMetrics { + pub fn new(registry: Arc, deployment: DeploymentHash) -> Self { + Self { + deployment, + restarts: registry + .global_counter_vec( + "deployment_substreams_blockstream_restarts", + "Counts the number of times a Substreams block stream is (re)started", + vec!["deployment", "provider", "success"].as_slice(), + ) + .unwrap(), + + connect_duration: registry + .global_gauge_vec( + "deployment_substreams_blockstream_connect_duration", + "Measures the time it takes to connect a Substreams block stream", + vec!["deployment", "provider"].as_slice(), + ) + .unwrap(), + + time_between_responses: registry + .global_histogram_vec( + "deployment_substreams_blockstream_time_between_responses", + "Measures the time between receiving and processing Substreams stream responses", + vec!["deployment", "provider"].as_slice(), + ) + .unwrap(), + + responses: registry + .global_counter_vec( + "deployment_substreams_blockstream_responses", + "Counts the number of responses received from a Substreams block stream", + vec!["deployment", "provider", "kind"].as_slice(), + ) + .unwrap(), + } + } + + fn observe_successful_connection(&self, time: &mut Instant, provider: &str) { + self.restarts + .with_label_values(&[&self.deployment, &provider, "true"]) + .inc(); + self.connect_duration + .with_label_values(&[&self.deployment, &provider]) + .set(time.elapsed().as_secs_f64()); + + // Reset last connection timestamp + *time = Instant::now(); + } + + fn observe_failed_connection(&self, time: &mut Instant, provider: &str) { + self.restarts + .with_label_values(&[&self.deployment, &provider, "false"]) + .inc(); + self.connect_duration + .with_label_values(&[&self.deployment, &provider]) + .set(time.elapsed().as_secs_f64()); + + // Reset last connection timestamp + *time = Instant::now(); + } + + fn observe_response(&self, kind: &str, time: &mut Instant, provider: &str) { + self.time_between_responses + .with_label_values(&[&self.deployment, &provider]) + .observe(time.elapsed().as_secs_f64()); + self.responses + .with_label_values(&[&self.deployment, &provider, kind]) + .inc(); + + // Reset last response timestamp + *time = Instant::now(); + } +} + +/// Gets the next range of blocks that is ready to stream +/// Returns the range that includes the start block if one is provided. +/// If start block is provided it will return the range [start_block,`current_block`[ +/// wthin the relevant segment. +/// If start block is None, the same range of the lowest segment is returned. +/// When the `current_block` of the segment is lower or eql the provided start_block then (start_block, start_block) +/// is returned indicating there are no new blocks for processing. +async fn next_segment_block_range( + segments: &Vec, + start_block: Option, + range_size: i32, +) -> Result>, StoreError> { + // Start block will be included in the range + fn take_n_blocks( + segments: &Vec, + start_block: BlockNumber, + n: i32, + ) -> Option> { + let mut stop_block = start_block; + + for segment in segments { + let is_complete = segment.is_complete(); + let starts_after_this_segment = start_block >= segment.stop_block; + + match (is_complete, starts_after_this_segment) { + (true, true) => continue, + // [start_block, stop_block] + next segment + (true, false) => { + stop_block = segment.stop_block; + + let size = stop_block - start_block; + if size >= n { + return Some(start_block..start_block + n); + } + + continue; + } + (false, true) => return None, + // last segment we can process + (false, false) => { + stop_block = match segment.current_block { + // at this point either this is the first segment and stop_block == start_block + // or a previous segment has been included. + Some(curr) if curr > stop_block => curr, + _ => stop_block, + }; + + if start_block == stop_block { + return None; + } + + break; + } + } + } + + let size = stop_block - start_block; + if size >= n { + return Some(start_block..start_block + n); + } + + Some(start_block..stop_block) + } + + if segments.is_empty() { + return Ok(None); + } + + let start_block = match start_block { + Some(sb) => sb, + None => { + return Ok(take_n_blocks( + &segments, + segments.first().unwrap().start_block, + range_size, + )) + } + }; + + if segments.last().unwrap().stop_block >= start_block { + return Ok(Some(start_block..i32::MAX)); + } + + Ok(take_n_blocks(&segments, start_block, range_size)) +} + +#[cfg(test)] +mod test { + use std::ops::Range; + + use crate::components::store::{BlockNumber, DeploymentId, SegmentDetails}; + + #[ignore] + #[tokio::test] + async fn next_segment_block_range() { + struct Case<'a> { + name: &'a str, + segments: Vec<(BlockNumber, BlockNumber, Option)>, + start_block: Option, + range_size: i32, + result: Option>, + } + + let cases = vec![ + Case { + name: "no segments", + segments: vec![], + start_block: todo!(), + range_size: todo!(), + result: todo!(), + }, + Case { + name: "none start block", + segments: vec![], + start_block: todo!(), + range_size: todo!(), + result: todo!(), + }, + Case { + name: "available blocks in segments shorter than range", + segments: vec![], + start_block: todo!(), + range_size: todo!(), + result: todo!(), + }, + Case { + name: "no more blocks in segments", + segments: vec![], + start_block: todo!(), + range_size: todo!(), + result: todo!(), + }, + Case { + name: "segments no completed", + segments: vec![], + start_block: todo!(), + range_size: todo!(), + result: todo!(), + }, + ]; + + for case in cases.into_iter() { + let Case { + name, + segments, + start_block, + range_size, + result, + } = case; + let segments = segments + .into_iter() + .map(|(start, stop, current)| SegmentDetails { + id: crate::components::store::SubgraphSegmentId(0), + deployment: DeploymentId(0), + start_block: start, + stop_block: stop, + current_block: current, + }) + .collect(); + + let range = super::next_segment_block_range(&segments, start_block, range_size) + .await + .unwrap(); + assert_eq!( + range, result, + "case failed: {}. Expected {:?} and got {:?}", + name, result, range + ); + } + } +} diff --git a/graph/src/indexer/mod.rs b/graph/src/indexer/mod.rs new file mode 100644 index 00000000000..1d8c194da71 --- /dev/null +++ b/graph/src/indexer/mod.rs @@ -0,0 +1,499 @@ +use std::time::Duration; +use std::{collections::HashMap, pin::Pin, sync::Arc}; + +use crate::blockchain::block_stream::BlockStreamError; +use crate::blockchain::BlockPtr; +use crate::components::metrics::stopwatch::StopwatchMetrics; +use crate::components::store::{ + DeploymentId, SegmentDetails, SubgraphSegment, SubgraphSegmentId, WritableStore, +}; +use crate::util::backoff::ExponentialBackoff; +use crate::util::futures::retry; +use crate::{ + blockchain::{ + block_stream::{BlockStreamEvent, FirehoseCursor}, + Blockchain, + }, + components::store::{DeploymentCursorTracker, DeploymentLocator}, + data::subgraph::UnifiedMappingApiVersion, + itertools::Itertools, + prelude::{BlockNumber, CheapClone, ENV_VARS}, + schema::InputSchema, +}; +use anyhow::{anyhow, Error, Result}; +use async_trait::async_trait; +use borsh::{BorshDeserialize, BorshSerialize}; +use futures03::{Stream, StreamExt}; +use slog::Logger; +use tokio::{sync::mpsc, time::Instant}; + +pub mod block_stream; +pub mod store; + +pub type Item = Box<[u8]>; + +#[derive(Clone, BorshSerialize, BorshDeserialize)] +pub struct StateDelta { + delta: Vec, +} + +#[derive(BorshSerialize, BorshDeserialize, Clone)] +enum StateOperation { + Set(Key, Item), + Unset(Key, Item), +} + +// TODO: Maybe this should be a type defined by the store so it can have more efficient representation +// for each store implementation. +#[derive(Default, BorshSerialize, BorshDeserialize)] +pub struct State { + items: HashMap, Item>, + tags: HashMap>>, + deltas: Vec, +} + +impl State { + pub fn delta(&self) -> StateDelta { + StateDelta { + delta: self.deltas.clone(), + } + } +} + +#[derive(BorshSerialize, BorshDeserialize, Hash, Clone)] +pub struct Key { + pub id: Box<[u8]>, + pub tag: Option, +} + +impl State { + pub fn set_encode(&mut self, key: Key, item: B) -> Result<()> { + self.set(key, borsh::to_vec(&item)?) + } + + pub fn set(&mut self, _key: Key, _item: impl AsRef<[u8]>) -> Result<()> { + unimplemented!(); + } + pub fn get(&mut self, _key: Key) -> Result> { + unimplemented!() + } + pub fn get_keys(&mut self, tag: &'static str) -> Result> { + let keys = self + .tags + .get(tag) + .unwrap_or(&vec![]) + .into_iter() + .map(|k| Key { + id: k.clone(), + // This is not ideal but the derive macro only works for String, will look into this later + tag: Some(tag.to_string()), + }) + .collect_vec(); + + Ok(keys) + } + + pub fn apply(&mut self, _delta: StateDelta) { + todo!() + } +} + +pub struct EncodedBlock(pub Box<[u8]>); +pub struct EncodedTriggers(pub Box<[u8]>); +pub type BlockSender = mpsc::Sender<(BlockPtr, EncodedTriggers)>; + +pub struct TriggerMap(HashMap); + +#[async_trait] +/// Indexer store is the store where the triggers will be kept to be processed by subgraphs +/// later. The indexer store will be used to populate several logical segments of a dataset, +/// therefore it can not assume to know the full state of the underlying storage at any time. +pub trait IndexerStore: Sync + Send { + /// Last Stable Block (LSB) is the last block the rest of the system can use + /// for streaming, copying, whatever else. + async fn get_last_stable_block(&self) -> Result>; + /// Stream from will send all relevant blocks starting with bn inclusively up to + /// LSB, forever. + async fn stream_from(&self, bn: BlockNumber, bs: BlockSender) -> Result<()>; + /// Get the triggers for a specific block. + async fn get(&self, bn: BlockNumber, s: SubgraphSegment) -> Result>; + /// Set the triggers for a specific block. Set can be called in parallel for different + /// segments of the Indexer store, therefore, set can assume it will be forward-only within + /// a segment but not on the entirety of the data. + async fn set( + &self, + bn: BlockPtr, + s: &SubgraphSegment, + state: &State, + triggers: EncodedTriggers, + ) -> Result<()>; + /// Get state is currently not implemented and will prolly be removed. + async fn get_state(&self, bn: BlockNumber) -> Result; + /// Sets the latest block up to witch data can be consumed. + async fn set_last_stable_block(&self, segment: SubgraphSegment, bn: BlockNumber) -> Result<()>; + /// Get segments if set + async fn get_segments(&self) -> Result>; + /// Create segments if none exist otherwise should be a noop. + async fn set_segments( + &self, + segments: Vec<(BlockNumber, BlockNumber)>, + ) -> Result>; +} + +/// BlockTransform the specific transformation to apply to every block, one of the implemtnations +/// will be the WASM mapping. +pub trait BlockTransform: Clone + Sync + Send { + fn transform(&self, block: EncodedBlock, state: State) -> (State, EncodedTriggers); +} + +/// IndexerContext will provide all inputs necessary for the processing +pub struct IndexerContext { + pub chain: Arc, + pub transform: Arc, + pub store: Arc, + pub deployment: DeploymentLocator, + pub logger: Logger, +} + +impl IndexerContext {} + +#[derive(Clone, Debug)] +struct IndexerCursorTracker { + schema: InputSchema, + start_block: BlockNumber, + stop_block: Option, + firehose_cursor: FirehoseCursor, +} + +impl DeploymentCursorTracker for IndexerCursorTracker { + fn input_schema(&self) -> crate::schema::InputSchema { + self.schema.cheap_clone() + } + + fn block_ptr(&self) -> Option { + None + } + + fn firehose_cursor(&self) -> FirehoseCursor { + FirehoseCursor::None + } +} + +/// Split the work in similar sized ranges, the end_block is non inclusive. +fn calculate_segments( + start_block: BlockNumber, + stop_block: BlockNumber, + workers: i16, +) -> Vec<(BlockNumber, BlockNumber)> { + let total = stop_block - start_block; + let chunk_size = (total / workers as i32) + 1; + let mut res = vec![]; + + let mut start = start_block; + loop { + if start >= stop_block { + break; + } + let end = (start + chunk_size).min(stop_block); + res.push((start, end)); + start = end; + } + + res +} + +/// The IndexWorker glues all of the other types together and will manage the lifecycle +/// of the pre-indexing. +#[derive(Clone, Debug)] +pub struct IndexWorker {} + +impl IndexWorker { + async fn get_or_create_segments( + &self, + store: &Arc, + start_block: BlockNumber, + stop_block: BlockNumber, + workers: i16, + ) -> Result> + where + S: IndexerStore + 'static, + { + let segments = store.get_segments().await?; + if !segments.is_empty() { + return Ok(segments); + } + + let segments = calculate_segments(start_block, stop_block, workers); + + store + .set_segments(segments) + .await + .map_err(anyhow::Error::from) + } + + /// Breaks the finite range into segments. Schedules each chunk to a worker tokio task, + /// this work is IO bound for the most so this should be fine. The upper limit of the + /// range is going to be chain_head - REORG_THRESHOLD, this range will be processed in parallel + /// until it catches up to the head - reorg threshold. Afterwards it needs to switch to linear + /// processing so it can correctly handle reverts. If the start_block is within + /// 2*REORG_THRESHOLD of head already then linear should be used to avoid constantly swapping + /// between the two if the chain is fast enough. + pub async fn run_many( + &self, + ctx: Arc>, + cursor_tracker: impl DeploymentCursorTracker, + start_block: BlockNumber, + stop_block: Option, + filter: Arc, + api_version: UnifiedMappingApiVersion, + workers: i16, + stopwatch: StopwatchMetrics, + ) -> Result<()> + where + B: Blockchain + 'static, + T: BlockTransform + 'static, + S: IndexerStore + 'static, + { + let chain_store = ctx.chain.chain_store(); + let chain_head = forever_async(&ctx.logger, "get chain head", || { + let chain_store = chain_store.cheap_clone(); + async move { + chain_store + .chain_head_ptr() + .await + .and_then(|r| r.ok_or(anyhow!("Expected chain head to exist"))) + } + }) + .await?; + + let chain_head = chain_head.block_number() - ENV_VARS.reorg_threshold; + let stop_block = match stop_block { + Some(stop_block) => stop_block.min(chain_head), + None => chain_head, + }; + + let segments = self + .get_or_create_segments(&ctx.store, start_block, stop_block, workers) + .await?; + + let mut handles = vec![]; + for (i, segment) in segments.iter().enumerate() { + if segment.is_complete() { + continue; + } + // Handle this more gracefully, if AllBlocks is here then we should just stop run_many + // and switch to linear processing. + let details = segment.details().unwrap(); + let cursor_tracker = IndexerCursorTracker { + schema: cursor_tracker.input_schema(), + start_block: details.start_block, + stop_block: Some(details.stop_block), + firehose_cursor: FirehoseCursor::None, + }; + + let filter = filter.cheap_clone(); + let api_version = api_version.clone(); + let ctx = ctx.cheap_clone(); + let segment = segment.clone(); + let stopwatch = stopwatch.cheap_clone(); + handles.push(crate::spawn(async move { + let now = Instant::now(); + let r = Self::run( + ctx.cheap_clone(), + &segment, + cursor_tracker, + filter, + api_version, + stopwatch, + ) + .await; + if r.is_ok() { + // uwnrap: all segments at this point should be finite. + ctx.store + .set_last_stable_block(segment.clone(), segment.stop_block().unwrap() - 1) + .await?; + } + let end = Instant::now().duration_since(now).as_secs(); + println!("### task{} finished (took {}s)", i, end); + r + })); + } + + futures03::future::try_join_all(handles) + .await + .unwrap() + .into_iter() + .collect::, Error>>() + .unwrap(); + + Ok(()) + } + + /// Performs the indexing work forever, or until the stop_block is reached. Run will + /// start a new block_stream for the chain. + async fn run( + ctx: Arc>, + segment: &SubgraphSegment, + mut cursor_tracker: IndexerCursorTracker, + filter: Arc, + api_version: UnifiedMappingApiVersion, + stopwatch: StopwatchMetrics, + ) -> Result + where + B: Blockchain, + T: BlockTransform, + S: IndexerStore, + { + let block_stream = ctx + .chain + .new_block_stream( + ctx.deployment.clone(), + cursor_tracker.clone(), + vec![cursor_tracker.start_block], + filter, + api_version, + stopwatch, + ) + .await?; + + let cursor = + Self::process_stream(ctx, State::default(), Box::pin(block_stream), segment).await?; + cursor_tracker.firehose_cursor = cursor; + + Ok(cursor_tracker) + } + + /// Processes the stream until it ends or stop_block is reached. The stop_block is not + /// processed, once it's reached the previous cursor should be returned. + async fn process_stream( + ctx: Arc>, + initial_state: State, + mut stream: Pin, BlockStreamError>>>>, + segment: &SubgraphSegment, + ) -> Result + where + B: Blockchain, + T: BlockTransform, + S: IndexerStore, + { + let mut firehose_cursor = FirehoseCursor::None; + let mut previous_state = initial_state; + let stop_block = match segment { + SubgraphSegment::AllBlocks => None, + SubgraphSegment::Range(details) => Some(details.stop_block), + }; + + loop { + let evt = stream.next().await; + + let cursor = match evt { + Some(Ok(BlockStreamEvent::ProcessWasmBlock( + block_ptr, + _block_time, + data, + _handler, + cursor, + ))) => { + if let Some(stop_block) = stop_block { + if block_ptr.number >= stop_block { + break; + } + } + + let (state, triggers) = ctx + .transform + .transform(EncodedBlock(data), std::mem::take(&mut previous_state)); + previous_state = state; + ctx.store + .set(block_ptr, &segment, &previous_state, triggers) + .await?; + + cursor + } + + Some(Ok(BlockStreamEvent::ProcessBlock(_block, _cursor))) => { + unreachable!("Process block not implemented yet") + } + Some(Ok(BlockStreamEvent::Revert(revert_to_ptr, cursor))) => { + println!("Revert detected to block {}", revert_to_ptr); + + cursor + } + Some(Err(e)) => return Err(e.into()), + + None => { + println!("### done!"); + break; + } + }; + + firehose_cursor = cursor; + } + + Ok(firehose_cursor) + } +} + +#[cfg(test)] +mod tests { + use crate::components::store::BlockNumber; + + #[test] + fn calculate_segments() { + #[derive(Debug, Clone)] + struct Case { + start: BlockNumber, + end: BlockNumber, + workers: i16, + result: Vec<(i32, i32)>, + } + + let cases = vec![ + Case { + start: 1, + end: 1000, + workers: 1, + result: vec![(1, 1000)], + }, + Case { + start: 1, + end: 1000, + workers: 2, + result: vec![(1, 501), (501, 1000)], + }, + ]; + + for case in cases.into_iter() { + let Case { + start, + end, + workers, + result, + } = case.clone(); + + let res = super::calculate_segments(start, end, workers); + assert_eq!(result, res, "{:?}", case); + } + } +} + +// TODO: Re-use something +const BACKOFF_BASE: Duration = Duration::from_millis(100); +const BACKOFF_CEIL: Duration = Duration::from_secs(10); + +async fn forever_async(logger: &Logger, op: &str, f: F) -> Result +where + F: Fn() -> Fut, + Fut: std::future::Future>, +{ + let mut backoff = ExponentialBackoff::new(BACKOFF_BASE, BACKOFF_CEIL); + loop { + match f().await { + Ok(v) => return Ok(v), + Err(e) => { + slog::error!(&logger, "Failed to {}, retrying...\nerror: {:?}", op, e) + } + } + backoff.sleep_async().await; + } +} diff --git a/graph/src/indexer/store/mod.rs b/graph/src/indexer/store/mod.rs new file mode 100644 index 00000000000..d0f15d32702 --- /dev/null +++ b/graph/src/indexer/store/mod.rs @@ -0,0 +1,5 @@ +mod postgres; +mod sled; + +pub use postgres::*; +pub use sled::*; diff --git a/graph/src/indexer/store/postgres.rs b/graph/src/indexer/store/postgres.rs new file mode 100644 index 00000000000..73fa6db81c7 --- /dev/null +++ b/graph/src/indexer/store/postgres.rs @@ -0,0 +1,425 @@ +use std::collections::{BTreeSet, HashMap}; +use std::ops::Range; +use std::sync::Arc; +use std::time::Duration; + +use async_trait::async_trait; +use slog::Logger; + +use crate::anyhow::Result; +use crate::blockchain::block_stream::FirehoseCursor; +use crate::blockchain::{BlockHash, BlockPtr, BlockTime}; +use crate::cheap_clone::CheapClone; +use crate::components::metrics::stopwatch::StopwatchMetrics; +use crate::components::store::write::EntityModification; +use crate::components::store::{ + DeploymentLocator, SegmentDetails, SubgraphSegment, SubgraphSegmentId, SubgraphStore, + WritableStore, +}; +use crate::data::store::scalar::Bytes; +use crate::data::store::Id; +use crate::data::subgraph::LATEST_VERSION; +use crate::data::value::Word; +use crate::indexer::{BlockSender, EncodedTriggers, State}; +use crate::prelude::Value; +use crate::schema::{EntityKey, InputSchema}; +use crate::{components::store::BlockNumber, indexer::IndexerStore}; + +const SCHEMA: &str = " +type Trigger @entity(immutable: true) { + id: ID! + number: Int! + hash: Bytes! + data: Bytes! +}"; + +pub struct PostgresIndexerDB { + store: Arc, + global_writable: Arc, + deployment: DeploymentLocator, + logger: Logger, + metrics: StopwatchMetrics, + schema: InputSchema, +} + +impl PostgresIndexerDB { + pub async fn new( + store: Arc, + deployment: DeploymentLocator, + logger: Logger, + metrics: StopwatchMetrics, + ) -> Self { + let schema = InputSchema::parse(LATEST_VERSION, SCHEMA, deployment.hash.clone()).unwrap(); + let global_writable = store + .cheap_clone() + .writable( + logger.cheap_clone(), + deployment.id, + SubgraphSegment::AllBlocks, + Arc::new(vec![]), + ) + .await + .unwrap(); + + Self { + store, + deployment, + logger, + metrics, + schema, + global_writable, + } + } +} + +#[async_trait] +impl IndexerStore for PostgresIndexerDB { + async fn get_segments(&self) -> Result> { + self.global_writable + .get_segments(self.deployment.id) + .await + .map_err(anyhow::Error::from) + } + + async fn set_segments( + &self, + segments: Vec<(BlockNumber, BlockNumber)>, + ) -> Result> { + let segments = segments + .into_iter() + .map(|(start, end)| SegmentDetails { + // This will be auto generated + id: SubgraphSegmentId(0), + deployment: self.deployment.id, + start_block: start, + stop_block: end, + current_block: None, + }) + .collect(); + + self.global_writable + .create_segments(self.deployment.id, segments) + .await + .map_err(anyhow::Error::from) + } + + async fn get_last_stable_block(&self) -> Result> { + Ok(self.global_writable.block_ptr().map(|b| b.block_number())) + } + + async fn stream_from(&self, bn: BlockNumber, bs: BlockSender) -> Result<()> { + let entity_type = self.schema.entity_type("Trigger").unwrap(); + let mut next_start_block = bn; + loop { + let segments = self + .global_writable + .get_segments(self.deployment.id) + .await? + .into_iter() + .map(|s| { + assert!(matches!(s, SubgraphSegment::Range(_))); + s.details().unwrap().clone() + }) + .collect(); + let range = next_segment_block_range(&segments, Some(next_start_block), 500) + .unwrap() + .unwrap(); + println!("## range: {:?}", range); + + let last_block = range.end; + // when there are no blocks to consume start=end so this should be a noop. + let keys: BTreeSet = range + .into_iter() + .map(|block_number| { + entity_type.key(Id::String(Word::from(block_number.to_string()))) + }) + // .map(|block_number| entity_type.key(Id::Int8(block_number as i64))) + .collect(); + + if keys.is_empty() { + tokio::time::sleep(Duration::from_millis(500)).await; + continue; + } + + // println!("## keys: {:?}", keys); + let blocks = self.global_writable.get_many(keys.clone())?; + // println!("## blocks: {:?}", blocks); + + for key in keys { + let block = match blocks.get(&key) { + Some(block) => block, + None => continue, + }; + let ptr = match block.get("hash").unwrap() { + Value::Bytes(bs) => { + let hash = BlockHash(bs.as_slice().to_vec().into_boxed_slice()); + + BlockPtr { hash, number: bn } + } + _ => unreachable!(), + }; + let trigger = match block.get("data").unwrap() { + Value::Bytes(bs) => EncodedTriggers(bs.as_slice().to_vec().into_boxed_slice()), + _ => unreachable!(), + }; + + bs.send((ptr, trigger)).await?; + } + next_start_block = last_block; + } + } + async fn get(&self, _bn: BlockNumber, _s: SubgraphSegment) -> Result> { + unimplemented!() + } + async fn set( + &self, + bn: BlockPtr, + s: &SubgraphSegment, + state: &State, + triggers: EncodedTriggers, + ) -> Result<()> { + let writable = self + .store + .cheap_clone() + .writable( + self.logger.cheap_clone(), + self.deployment.id, + s.clone(), + Arc::new(vec![]), + ) + .await?; + let data: HashMap = HashMap::from_iter(vec![ + (Word::from("id"), Value::Int8(bn.number as i64)), + ( + Word::from("hash"), + Value::Bytes(Bytes::from(bn.hash.0.as_ref())), + ), + (Word::from("number"), Value::Int(bn.number)), + ( + Word::from("data"), + Value::Bytes(Bytes::from(triggers.0.as_ref())), + ), + ]); + + let entity_type = self.schema.entity_type("Trigger").unwrap(); + let entity = self.schema.make_entity(data).unwrap(); + + let entity = EntityModification::Insert { + key: entity_type.key(Id::String(bn.number.to_string().into())), + data: Arc::new(entity), + block: bn.number, + end: None, + }; + + writable + .transact_block_operations( + bn, + BlockTime::NONE, + FirehoseCursor::None, + vec![entity], + &self.metrics, + vec![], + vec![], + vec![], + false, + false, + ) + .await?; + + Ok(()) + } + + async fn get_state(&self, _bn: BlockNumber) -> Result { + unimplemented!() + } + async fn set_last_stable_block(&self, segment: SubgraphSegment, bn: BlockNumber) -> Result<()> { + let details = match segment { + SubgraphSegment::AllBlocks => unreachable!(), + SubgraphSegment::Range(details) => details, + }; + let stop_block = details.stop_block; + assert_eq!(bn, stop_block - 1); + + self.global_writable + .mark_subgraph_segment_complete(details) + .await + .map_err(anyhow::Error::from) + } +} + +/// Gets the next range of blocks that is ready to stream +/// Returns the range that includes the start block if one is provided. +/// If start block is provided it will return the range [start_block,`current_block`[ +/// wthin the relevant segment. +/// If start block is None, the same range of the lowest segment is returned. +/// When the `current_block` of the segment is lower or eql the provided start_block then (start_block, start_block) +/// is returned indicating there are no new blocks for processing. +fn next_segment_block_range( + segments: &Vec, + start_block: Option, + range_size: i32, +) -> anyhow::Result>> { + // Start block will be included in the range + fn take_n_blocks( + segments: &Vec, + start_block: BlockNumber, + n: i32, + ) -> Option> { + let mut stop_block = start_block; + let min_start_block = segments + .iter() + .map(|s| s.start_block) + .min() + .unwrap_or_default(); + let start_block = start_block.max(min_start_block); + + for segment in segments { + let is_complete = segment.is_complete(); + let starts_after_this_segment = start_block >= segment.stop_block; + + match (is_complete, starts_after_this_segment) { + (true, true) => continue, + // [start_block, stop_block] + next segment + (true, false) => { + stop_block = segment.stop_block; + + let size = stop_block - start_block; + if size >= n { + return Some(start_block..start_block + n); + } + + continue; + } + (false, true) => return None, + // last segment we can process + (false, false) => { + stop_block = match segment.current_block { + // at this point either this is the first segment and stop_block == start_block + // or a previous segment has been included. + Some(curr) if curr > stop_block => curr, + _ => stop_block, + }; + + if start_block == stop_block { + return None; + } + + break; + } + } + } + + let size = stop_block - start_block; + if size >= n { + return Some(start_block..start_block + n); + } + + Some(start_block..stop_block) + } + + if segments.is_empty() { + return Ok(None); + } + + let start_block = match start_block { + Some(sb) => sb, + None => { + return Ok(take_n_blocks( + &segments, + segments.first().unwrap().start_block, + range_size, + )) + } + }; + + if segments.last().unwrap().stop_block <= start_block { + return Ok(Some(start_block..i32::MAX)); + } + + Ok(take_n_blocks(&segments, start_block, range_size)) +} + +#[cfg(test)] +mod test { + use std::ops::Range; + + use crate::components::store::{BlockNumber, DeploymentId, SegmentDetails}; + + #[ignore] + #[test] + fn next_segment_block_range() { + struct Case<'a> { + name: &'a str, + segments: Vec<(BlockNumber, BlockNumber, Option)>, + start_block: Option, + range_size: i32, + result: Option>, + } + + let cases = vec![ + Case { + name: "no segments", + segments: vec![], + start_block: todo!(), + range_size: todo!(), + result: todo!(), + }, + Case { + name: "none start block", + segments: vec![], + start_block: todo!(), + range_size: todo!(), + result: todo!(), + }, + Case { + name: "available blocks in segments shorter than range", + segments: vec![], + start_block: todo!(), + range_size: todo!(), + result: todo!(), + }, + Case { + name: "no more blocks in segments", + segments: vec![], + start_block: todo!(), + range_size: todo!(), + result: todo!(), + }, + Case { + name: "segments no completed", + segments: vec![], + start_block: todo!(), + range_size: todo!(), + result: todo!(), + }, + ]; + + for case in cases.into_iter() { + let Case { + name, + segments, + start_block, + range_size, + result, + } = case; + let segments: Vec = segments + .into_iter() + .map(|(start, stop, current)| SegmentDetails { + id: crate::components::store::SubgraphSegmentId(0), + deployment: DeploymentId(0), + start_block: start, + stop_block: stop, + current_block: current, + }) + .collect(); + + let range = + super::next_segment_block_range(&segments, start_block, range_size).unwrap(); + assert_eq!( + range, result, + "case failed: {}. Expected {:?} and got {:?}", + name, result, range + ); + } + } +} diff --git a/graph/src/indexer/store/sled.rs b/graph/src/indexer/store/sled.rs new file mode 100644 index 00000000000..43e48324ce3 --- /dev/null +++ b/graph/src/indexer/store/sled.rs @@ -0,0 +1,282 @@ +use std::sync::Arc; + +use crate::blockchain::BlockHash; +use crate::components::store::SubgraphSegment; +use crate::tokio::sync::RwLock; +use crate::{blockchain::BlockPtr, prelude::BlockNumber}; +use anyhow::{Error, Result}; +use async_trait::async_trait; +use borsh::{BorshDeserialize, BorshSerialize}; +use sled::{Db, Tree}; +use thiserror::Error; + +use crate::indexer::{BlockSender, EncodedTriggers, IndexerStore, State, StateDelta}; +pub const DB_NAME: &str = "/media/data/sled_indexer_db"; +pub const STATE_SNAPSHOT_FREQUENCY: u32 = 1000; +pub const TRIGGER_PREFIX: &str = "trigger_"; + +#[derive(BorshSerialize, BorshDeserialize)] +struct Value { + block_hash: Box<[u8]>, + data: Box<[u8]>, +} + +/// How frequently do we want state to be fully stored. +/// Never will prevent snapshots, this likely means state is not being used. +#[derive(Clone)] +pub enum StateSnapshotFrequency { + Never, + Blocks(u32), +} + +impl Default for StateSnapshotFrequency { + fn default() -> Self { + Self::Blocks(STATE_SNAPSHOT_FREQUENCY) + } +} + +#[derive(Debug, Error)] +pub enum SledStoreError { + #[error("A last stable block is required for this operation")] + LastStableBlockRequired, + #[error("sled returned an error: {0}")] + SledError(#[from] sled::Error), +} + +struct StoreInner { + last_state_snapshot: Option, +} + +#[derive(Clone)] +pub struct SledIndexerStore { + tree: Tree, + // Keeping interior mutability for now because of there is a chance we need to share the store + // and the IndexingContext would definitely be shared + inner: Arc>, + snapshot_frequency: StateSnapshotFrequency, +} + +impl SledIndexerStore { + pub fn new( + db: Arc, + tree_name: &str, + snapshot_frequency: StateSnapshotFrequency, + ) -> Result { + let tree = db.open_tree(tree_name).map_err(SledStoreError::from)?; + let last_state_snapshot = tree + .get(Self::latest_snapshot_key()) + .map_err(SledStoreError::from)? + .map(|v| BlockNumber::from_le_bytes(v.to_vec().try_into().unwrap())); + + let inner = Arc::new(RwLock::new(StoreInner { + last_state_snapshot, + })); + + Ok(Self { + tree, + inner, + snapshot_frequency, + }) + } + + pub fn state_delta_key(bn: BlockNumber) -> String { + format!("state_delta_{}", bn) + } + pub fn trigger_key(bn: BlockNumber) -> String { + format!("{}{}", TRIGGER_PREFIX, bn) + } + pub fn snapshot_key(bn: BlockNumber) -> String { + format!("state_snapshot_{}", bn) + } + pub fn latest_snapshot_key() -> String { + "latest_snapshot".to_string() + } + + pub fn last_stable_block_key() -> String { + "last_stable_block".to_string() + } + + async fn should_snapshot(&self, bn: BlockNumber) -> bool { + use StateSnapshotFrequency::*; + + let freq = match self.snapshot_frequency { + Never => return false, + Blocks(blocks) => blocks, + }; + + bn - self.inner.read().await.last_state_snapshot.unwrap_or(0) > freq.try_into().unwrap() + } +} + +#[async_trait] +impl IndexerStore for SledIndexerStore { + async fn get_segments(&self) -> Result> { + unimplemented!(); + } + async fn set_segments(&self, _segments: Vec<(i32, i32)>) -> Result> { + unimplemented!(); + } + async fn get(&self, bn: BlockNumber, _s: SubgraphSegment) -> Result> { + let res = match self + .tree + .get(bn.to_string()) + .map_err(SledStoreError::from)? + { + None => None, + Some(v) => Value::try_from_slice(v.as_ref()) + .map(|v| EncodedTriggers(v.data)) + .map(Some)?, + }; + + Ok(res) + } + async fn set_last_stable_block(&self, _: SubgraphSegment, bn: BlockNumber) -> Result<()> { + self.tree + .insert( + SledIndexerStore::last_stable_block_key().as_str(), + bn.to_le_bytes().to_vec(), + ) + .map_err(Error::from)?; + + self.tree + .flush_async() + .await + .map(|_| ()) + .map_err(Error::from) + } + + async fn set( + &self, + ptr: BlockPtr, + _s: &SubgraphSegment, + state: &State, + triggers: EncodedTriggers, + ) -> Result<()> { + let BlockPtr { hash, number: bn } = ptr; + let should_snapshot = self.should_snapshot(bn).await; + + let v = Value { + block_hash: hash.0, + data: triggers.0, + }; + let v = borsh::to_vec(&v)?; + self.tree.transaction(move |tx_db| { + use sled::transaction::ConflictableTransactionError::*; + + tx_db + .insert( + SledIndexerStore::state_delta_key(bn).as_str(), + borsh::to_vec(&state.delta()).unwrap(), + ) + .map_err(Abort)?; + + tx_db + .insert(SledIndexerStore::trigger_key(bn).as_str(), &*v) + .map_err(Abort)?; + + if should_snapshot { + tx_db + .insert( + SledIndexerStore::snapshot_key(bn).as_str(), + borsh::to_vec(&state).unwrap(), + ) + .map_err(Abort)?; + + tx_db + .insert( + SledIndexerStore::latest_snapshot_key().as_str(), + bn.to_le_bytes().to_vec(), + ) + .map_err(Abort)?; + } + + Ok(()) + })?; + + // This should always be a NOOP is state is not in use. + if should_snapshot { + self.inner.write().await.last_state_snapshot = Some(bn); + } + + Ok(()) + } + + async fn get_state(&self, bn: BlockNumber) -> Result { + let block = match self.inner.read().await.last_state_snapshot { + None => return Ok(State::default()), + Some(block) => block, + }; + + let snapshot = self + .tree + .get(SledIndexerStore::snapshot_key(block)) + .map_err(SledStoreError::from)? + .expect("last_state_snapshot doesn't match the marker"); + let mut state = State::try_from_slice(&snapshot.as_ref()).unwrap(); + + self.tree + .range( + SledIndexerStore::state_delta_key(block + 1)..SledIndexerStore::state_delta_key(bn), + ) + .collect::, _>>()? + .iter() + .for_each(|(_, v)| { + let delta = StateDelta::try_from_slice(v.as_ref()).unwrap(); + + state.apply(delta); + }); + + Ok(state) + } + async fn get_last_stable_block(&self) -> Result> { + let b = self + .tree + .get(Self::last_stable_block_key())? + .map(|ivec| i32::from_le_bytes(ivec.as_ref().try_into().unwrap())); + + Ok(b) + } + + async fn stream_from(&self, bn: BlockNumber, sender: BlockSender) -> Result<()> { + let last = self + .get_last_stable_block() + .await? + .ok_or(SledStoreError::LastStableBlockRequired)?; + + let mut iter = self + .tree + .range(Self::trigger_key(bn)..Self::trigger_key(last)); + + loop { + let next_trigger = iter.next(); + let bs: (BlockPtr, EncodedTriggers) = match next_trigger { + Some(Ok((key, value))) => { + let block = + key.subslice(TRIGGER_PREFIX.len(), key.len() - TRIGGER_PREFIX.len()); + let block: i32 = String::from_utf8_lossy(block.as_ref()).parse().unwrap(); + + let value = Value::try_from_slice(value.as_ref())?; + let trigger = EncodedTriggers(value.data); + let block = BlockPtr { + hash: BlockHash(value.block_hash), + number: block, + }; + + (block, trigger) + } + None => break, + _ => unreachable!(), + }; + + match sender.send(bs).await { + Ok(()) => {} + Err(_) => { + println!("sender dropped, stream ending"); + break; + } + } + } + + Ok(()) + } +} diff --git a/graph/src/lib.rs b/graph/src/lib.rs index 8ab0c90dbd7..64f65af86dc 100644 --- a/graph/src/lib.rs +++ b/graph/src/lib.rs @@ -37,6 +37,8 @@ pub mod schema; /// Helpers for parsing environment variables. pub mod env; +pub mod indexer; + /// Wrapper for spawning tasks that abort on panic, which is our default. mod task_spawn; pub use task_spawn::{ diff --git a/node/Cargo.toml b/node/Cargo.toml index 7fe8b879984..2a26ceda5f8 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -12,6 +12,10 @@ path = "src/main.rs" name = "graphman" path = "src/bin/manager.rs" +[[bin]] +name = "indexer" +path = "src/bin/indexer.rs" + [dependencies] clap = { version = "3.2.25", features = ["derive", "env"] } env_logger = "0.10.1" @@ -28,6 +32,7 @@ graph-chain-near = { path = "../chain/near" } graph-chain-cosmos = { path = "../chain/cosmos" } graph-chain-substreams = { path = "../chain/substreams" } graph-chain-starknet = { path = "../chain/starknet" } +graph-chain-dataset = { path = "../chain/dataset" } graph-graphql = { path = "../graphql" } graph-server-http = { path = "../server/http" } graph-server-index-node = { path = "../server/index-node" } @@ -42,3 +47,7 @@ diesel = { workspace = true } http = "0.2.5" # must be compatible with the version rust-web3 uses prometheus = { version = "0.13.3", features = ["push"] } json-structural-diff = { version = "0.1", features = ["colorize"] } + +sled = "0.34.7" +borsh = { version = "1.3.1", features = ["derive"] } +uniswap = { path = "../transforms/uniswap" } diff --git a/node/src/bin/indexer.rs b/node/src/bin/indexer.rs new file mode 100644 index 00000000000..b2d3c05a328 --- /dev/null +++ b/node/src/bin/indexer.rs @@ -0,0 +1,89 @@ +use std::sync::Arc; + +use graph::{ + anyhow::Result, + blockchain::{block_stream::BlockStreamEvent, mock::MockBlockchain}, + indexer::{ + block_stream::IndexerBlockStream, + store::{SledIndexerStore, DB_NAME}, + }, + prelude::{prost::Message, DeploymentHash, MetricsRegistry}, + slog::info, + tokio::{self, time::Instant}, + tokio_stream::StreamExt, +}; +use uniswap::proto::edgeandnode::uniswap::v1::{Event, EventType, Events}; + +#[tokio::main] +pub async fn main() -> Result<()> { + let deployment: &str = "QmagGaBm7FL9uQWg1bk52Eb3LTN4owkvxEKkirtyXNLQc9"; + let hash = DeploymentHash::new(deployment.clone()).unwrap(); + let db = Arc::new(sled::open(DB_NAME).unwrap()); + let store = Arc::new( + SledIndexerStore::new( + db, + deployment, + graph::indexer::store::StateSnapshotFrequency::Never, + ) + .unwrap(), + ); + let logger = graph::log::logger(true); + let metrics = Arc::new(MetricsRegistry::mock()); + + let mut stream = IndexerBlockStream::::new( + hash, + store, + None, + vec![12369730], + vec![12369750], + logger.clone(), + "handleBlock".into(), + metrics, + ); + + let earlier = Instant::now(); + let mut prev_ptr = 0; + loop { + let x = stream.next().await; + if x.is_none() { + break; + } + + let (ptr, data) = match x.unwrap().unwrap() { + BlockStreamEvent::ProcessWasmBlock(ptr, _, data, _, _) => (ptr, data), + _ => unreachable!(), + }; + // 12369739 + if ptr.number < 12369739 { + continue; + } + + if ptr.number > 12369739 { + return Ok(()); + } + if prev_ptr > ptr.number { + break; + } else { + prev_ptr = ptr.number; + } + let evts: Events = Message::decode(data.as_ref()).unwrap(); + let x = evts.events.first(); + if x.is_none() { + continue; + } + let x = x.unwrap(); + let pool_created: Vec<&Event> = evts + .events + .iter() + .filter(|e| e.r#type() == EventType::PoolCreated) + .collect(); + // info!(&logger, "====== {}:{} ======", x.address, x.block_number); + pool_created.iter().for_each(|e| { + info!(&logger, "poolCreated: owner:{} addr:{}", e.owner, e.address); + }); + } + let diff = Instant::now().duration_since(earlier).as_secs(); + println!("### Total streaming time: {}s", diff); + + Ok(()) +} diff --git a/node/src/main.rs b/node/src/main.rs index 8fbac6f3a84..e77c9077db5 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -49,6 +49,7 @@ use std::collections::BTreeMap; use std::collections::HashMap; use std::io::{BufRead, BufReader}; use std::path::Path; +use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; @@ -886,16 +887,27 @@ fn ethereum_networks_as_chains( Arc::new(EthereumStreamBuilder {}), Arc::new(EthereumBlockRefetcher {}), Arc::new(adapter_selector), - runtime_adapter, + runtime_adapter.cheap_clone(), ENV_VARS.reorg_threshold, chain_config.polling_interval, is_ingestible, ); - (network_name.clone(), Arc::new(chain)) + (network_name.clone(), Arc::new(chain), runtime_adapter) }) .collect(); - for (network_name, chain) in chains.iter().cloned() { + for (network_name, chain, runtime_adapter) in chains.iter().cloned() { + let dataset_chain = graph_chain_dataset::Chain::new( + Some(runtime_adapter.eth_adapters.cheap_clone()), + runtime_adapter.call_cache.cheap_clone(), + logger_factory.clone(), + metrics_registry.cheap_clone(), + store.subgraph_store(), + store.block_store().chain_store(&network_name).unwrap(), + ); + + blockchain_map + .insert::(network_name.clone(), Arc::new(dataset_chain)); blockchain_map.insert::(network_name, chain) } @@ -919,5 +931,5 @@ fn ethereum_networks_as_chains( } } - HashMap::from_iter(chains) + HashMap::from_iter(chains.into_iter().map(|(a, b, _c)| (a, b))) } diff --git a/runtime/test/src/common.rs b/runtime/test/src/common.rs index 26ac308380f..75392de9df4 100644 --- a/runtime/test/src/common.rs +++ b/runtime/test/src/common.rs @@ -1,6 +1,7 @@ use ethabi::Contract; use graph::blockchain::BlockTime; use graph::components::store::DeploymentLocator; +use graph::components::store::SubgraphSegment; use graph::data::subgraph::*; use graph::data_source; use graph::env::EnvVars; @@ -120,6 +121,7 @@ pub fn mock_context( futures03::executor::block_on(store.writable( LOGGER.clone(), deployment.id, + SubgraphSegment::default(), Arc::new(Vec::new()), )) .unwrap(), diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 5610e82a7d0..2ecd6b86e8a 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -1065,7 +1065,12 @@ async fn test_entity_store(api_version: Version) { // We need to empty the cache for the next test let writable = store - .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await .unwrap(); let ctx = instance.store.data_mut(); diff --git a/server/index-node/src/resolver.rs b/server/index-node/src/resolver.rs index de0421e101e..70d4852d64f 100644 --- a/server/index-node/src/resolver.rs +++ b/server/index-node/src/resolver.rs @@ -607,6 +607,23 @@ impl IndexNodeResolver { ) .await? } + BlockchainKind::Dataset => { + let unvalidated_subgraph_manifest = + UnvalidatedSubgraphManifest::::resolve( + deployment_hash.clone(), + raw_yaml, + &self.link_resolver, + &self.logger, + max_spec_version, + ) + .await?; + + Self::validate_and_extract_features( + &self.store.subgraph_store(), + unvalidated_subgraph_manifest, + ) + .await? + } }; Ok(result) @@ -712,7 +729,9 @@ impl IndexNodeResolver { | BlockchainKind::Ethereum | BlockchainKind::Cosmos | BlockchainKind::Near - | BlockchainKind::Starknet => (), + | BlockchainKind::Starknet + // Dataset doesn't have blocks of its own so I don't think this will be needed. + | BlockchainKind::Dataset => (), } // The given network does not exist. diff --git a/store/postgres/migrations/2024-03-06-134207_subgraph-segments/down.sql b/store/postgres/migrations/2024-03-06-134207_subgraph-segments/down.sql new file mode 100644 index 00000000000..2e07b3f0ef0 --- /dev/null +++ b/store/postgres/migrations/2024-03-06-134207_subgraph-segments/down.sql @@ -0,0 +1,3 @@ +-- This file should undo anything in `up.sql` +drop table subgraphs.segments; + diff --git a/store/postgres/migrations/2024-03-06-134207_subgraph-segments/up.sql b/store/postgres/migrations/2024-03-06-134207_subgraph-segments/up.sql new file mode 100644 index 00000000000..be16c27d348 --- /dev/null +++ b/store/postgres/migrations/2024-03-06-134207_subgraph-segments/up.sql @@ -0,0 +1,8 @@ +-- Your SQL goes here +create table subgraphs.subgraph_segments( + id serial primary key, + deployment int4 references subgraphs.subgraph_deployment(id) on delete cascade, + start_block int4 not null, + stop_block int4 not null, + current_block int4 null +); diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index 7fa9534e69d..d7be75658c3 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -6,6 +6,7 @@ use diesel::{ connection::SimpleConnection, dsl::{count, delete, insert_into, select, sql, update}, sql_types::{Bool, Integer}, + Connection, NullableExpressionMethods, }; use diesel::{expression::SqlLiteral, pg::PgConnection, sql_types::Numeric}; use diesel::{ @@ -14,7 +15,9 @@ use diesel::{ sql_types::{Nullable, Text}, }; use graph::{ - blockchain::block_stream::FirehoseCursor, data::subgraph::schema::SubgraphError, + blockchain::block_stream::FirehoseCursor, + components::store::{SegmentDetails, SubgraphSegment}, + data::subgraph::schema::SubgraphError, schema::EntityType, }; use graph::{ @@ -411,13 +414,39 @@ pub fn set_manifest_raw_yaml( .map_err(|e| e.into()) } -pub fn transact_block( +pub fn mark_subgraph_segment_complete( conn: &mut PgConnection, - site: &Site, + segment: &SegmentDetails, +) -> Result<(), StoreError> { + use crate::diesel::BoolExpressionMethods; + use crate::primary::subgraph_segments as s; + + // Work around a Diesel issue with serializing BigDecimals to numeric + let number = format!("{}::numeric", segment.stop_block - 1); + + update( + s::table.filter(s::id.eq(segment.id.0)).filter( + // Asserts that the processing direction is forward. + s::current_block + .lt(sql(&number)) + .or(s::current_block.is_null()), + ), + ) + .set((s::current_block.eq(sql(&number)),)) + .returning(s::start_block) + .get_results::(conn) + .map_err(StoreError::from)?; + + Ok(()) +} + +fn update_subgraph_deployment( + conn: &mut PgConnection, + deployment_id: &DeploymentId, ptr: &BlockPtr, firehose_cursor: &FirehoseCursor, count: i32, -) -> Result { +) -> Result, StoreError> { use crate::diesel::BoolExpressionMethods; use subgraph_deployment as d; @@ -427,7 +456,7 @@ pub fn transact_block( let count_sql = entity_count_sql(count); let rows = update( - d::table.filter(d::id.eq(site.id)).filter( + d::table.filter(d::id.eq(deployment_id)).filter( // Asserts that the processing direction is forward. d::latest_ethereum_block_number .lt(sql(&number)) @@ -445,6 +474,58 @@ pub fn transact_block( .get_results::(conn) .map_err(StoreError::from)?; + Ok(rows) +} + +pub fn transact_block( + conn: &mut PgConnection, + site: &Site, + segment: &SubgraphSegment, + ptr: &BlockPtr, + firehose_cursor: &FirehoseCursor, + count: i32, +) -> Result { + fn update_segment( + conn: &mut PgConnection, + ptr: &BlockPtr, + segment: &SegmentDetails, + ) -> Result, StoreError> { + use crate::diesel::BoolExpressionMethods; + use crate::primary::subgraph_segments as s; + + assert!(ptr.number >= segment.start_block); + assert!(ptr.number <= segment.stop_block - 1); + + // Work around a Diesel issue with serializing BigDecimals to numeric + let number = format!("{}::numeric", ptr.number); + + let rows = update( + s::table.filter(s::id.eq(segment.id.0)).filter( + // Asserts that the processing direction is forward. + s::current_block + .lt(sql(&number)) + .or(s::current_block.is_null()), + ), + ) + .set((s::current_block.eq(sql(&number)),)) + .returning(s::start_block) + .get_results::(conn) + .map_err(StoreError::from)?; + + Ok(rows) + } + + let rows = match segment { + // When full acess is used, only the subgraph_deployment table is updated since + // that is what the rest of the systems looks for. + SubgraphSegment::AllBlocks => { + update_subgraph_deployment(conn, &site.id, ptr, firehose_cursor, count) + } + // When a segment is used then the progress is updated on the subgraph_segments table + // it is up to the caller to figure out when to switch from one mode to the other. + SubgraphSegment::Range(details) => update_segment(conn, ptr, &details), + }?; + match rows.len() { // Common case: A single row was updated. 1 => Ok(rows[0]), @@ -1169,6 +1250,117 @@ fn entity_count_sql(count: i32) -> String { format!("entity_count + ({count})") } +pub fn create_subgraph_segments( + conn: &mut PgConnection, + deployment: graph::components::store::DeploymentId, + segments: Vec, +) -> Result, StoreError> { + use crate::primary::subgraph_segments::dsl as s; + use crate::primary::SegmentDetails as StoreSegmentDetails; + + let count: i64 = s::subgraph_segments + .filter(s::deployment.eq(&deployment.0)) + .count() + .get_result(conn)?; + + if count == 0 { + conn.transaction(|conn| { + segments + .into_iter() + .map(|details| { + let SegmentDetails { + id: _, + deployment, + start_block, + stop_block: end_block, + current_block: _, + } = details; + + diesel::insert_into(s::subgraph_segments) + .values(( + s::deployment.eq(&deployment.0), + s::start_block.eq(&start_block), + s::stop_block.eq(&end_block), + )) + .on_conflict_do_nothing() + .execute(conn) + }) + .collect::, diesel::result::Error>>() + })?; + } + + s::subgraph_segments + .select(( + s::id, + s::deployment, + s::start_block, + s::stop_block, + s::current_block.nullable(), + )) + .filter(s::deployment.eq(&deployment.0)) + .get_results::(conn) + .map_err(StoreError::from) + .map(|ds| { + ds.into_iter() + .map(SegmentDetails::from) + .map(SubgraphSegment::Range) + .collect() + }) +} + +pub fn subgraph_segment_for_block_number( + conn: &mut PgConnection, + deployment: DeploymentId, + bn: Option, +) -> Result { + use crate::primary::subgraph_segments::dsl as s; + use crate::primary::SegmentDetails as StoreSegmentDetails; + + s::subgraph_segments + .select(( + s::id, + s::deployment, + s::start_block, + s::stop_block, + s::current_block.nullable(), + )) + .filter(s::deployment.eq(&deployment)) + .filter(s::start_block.ge(&bn.unwrap_or_default())) + .order(s::start_block.desc()) + .limit(1) + .get_result::(conn) + .map_err(StoreError::from) + .map_err(Into::into) + .map(SegmentDetails::from) +} + +pub fn subgraph_segments( + conn: &mut PgConnection, + deployment: DeploymentId, +) -> Result, StoreError> { + use crate::primary::subgraph_segments::dsl as s; + use crate::primary::SegmentDetails as StoreSegmentDetails; + + s::subgraph_segments + .select(( + s::id, + s::deployment, + s::start_block, + s::stop_block, + s::current_block.nullable(), + )) + .filter(s::deployment.eq(&deployment)) + .get_results::(conn) + .map_err(StoreError::from) + .map_err(Into::into) + .map(|ds| { + ds.into_iter() + .map(SegmentDetails::from) + .map(SubgraphSegment::Range) + .collect() + }) +} + pub fn update_entity_count( conn: &mut PgConnection, site: &Site, diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 8bddc48b8fb..603d46aa03d 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -9,7 +9,7 @@ use graph::blockchain::BlockTime; use graph::components::store::write::RowGroup; use graph::components::store::{ Batch, DerivedEntityQuery, PrunePhase, PruneReporter, PruneRequest, PruningStrategy, - QueryPermit, StoredDynamicDataSource, VersionStats, + QueryPermit, StoredDynamicDataSource, SubgraphSegment, VersionStats, }; use graph::components::versions::VERSIONS; use graph::data::query::Trace; @@ -19,7 +19,7 @@ use graph::data_source::CausalityRegion; use graph::prelude::futures03::FutureExt; use graph::prelude::{ ApiVersion, CancelHandle, CancelToken, CancelableError, EntityOperation, PoolWaitStats, - SubgraphDeploymentEntity, + SubgraphDeploymentEntity, TryFutureExt, }; use graph::semver::Version; use graph::tokio::task::JoinHandle; @@ -50,7 +50,7 @@ use crate::block_range::{BLOCK_COLUMN, BLOCK_RANGE_COLUMN}; use crate::deployment::{self, OnSync}; use crate::detail::ErrorDetail; use crate::dynds::DataSourcesTable; -use crate::primary::DeploymentId; +use crate::primary::{DeploymentId, SegmentDetails}; use crate::relational::index::{CreateIndex, Method}; use crate::relational::{Layout, LayoutCache, SqlName, Table}; use crate::relational_queries::FromEntityData; @@ -1106,10 +1106,75 @@ impl DeploymentStore { .map(|(entities, _)| entities) } + pub(crate) async fn create_subgraph_segments( + &self, + + deployment: graph::components::store::DeploymentId, + segments: Vec, + ) -> Result, StoreError> { + self.with_conn(move |conn, _| { + deployment::create_subgraph_segments( + conn, + deployment, + segments.into_iter().map(Into::into).collect(), + ) + .map_err(CancelableError::from) + }) + .await + } + + /// Gets the next range of blocks that is ready to stream + /// Returns the range that includes the start block if one is provided. + /// If start block is provided it will return the range [start_block,`current_block`[ + /// wthin the relevant segment. + /// If start block is None, the same range of the lowest segment is returned. + /// When the `current_block` of the segment is lower or eql the provided start_block then (start_block, start_block) + /// is returned indicating there are no new blocks for processing. + pub(crate) async fn next_segment_block_range( + &self, + deployment: graph::components::store::DeploymentId, + start_block: Option, + ) -> Result<(BlockNumber, BlockNumber), StoreError> { + let details = self + .with_conn(move |conn, _| { + deployment::subgraph_segment_for_block_number(conn, deployment.into(), start_block) + .map_err(CancelableError::from) + }) + .await?; + let start_block = start_block.map(|s| s + 1).unwrap_or(details.start_block); + let current_block = details.current_block.unwrap_or(start_block); + + let res = (start_block, current_block.min(start_block)); + + Ok(res) + } + + pub(crate) async fn subgraph_segments( + &self, + deployment: graph::components::store::DeploymentId, + ) -> Result, StoreError> { + self.with_conn(move |conn, _| { + deployment::subgraph_segments(conn, deployment.into()).map_err(CancelableError::from) + }) + .await + } + + pub(crate) async fn mark_subgraph_segment_complete( + &self, + segment: SegmentDetails, + ) -> Result<(), StoreError> { + self.with_conn(move |conn, _| { + deployment::mark_subgraph_segment_complete(conn, &segment.into()) + .map_err(CancelableError::from) + }) + .await + } + pub(crate) fn transact_block_operations( self: &Arc, logger: &Logger, site: Arc, + segment: &SubgraphSegment, batch: &Batch, last_rollup: Option, stopwatch: &StopwatchMetrics, @@ -1167,6 +1232,7 @@ impl DeploymentStore { let earliest_block = deployment::transact_block( conn, &site, + segment, &batch.block_ptr, &batch.firehose_cursor, count, diff --git a/store/postgres/src/primary.rs b/store/postgres/src/primary.rs index 48628c28a6f..a755d4e255f 100644 --- a/store/postgres/src/primary.rs +++ b/store/postgres/src/primary.rs @@ -9,6 +9,7 @@ use diesel::{ pg::Pg, serialize::{Output, ToSql}, sql_types::{Array, BigInt, Bool, Integer, Text}, + Queryable, }; use diesel::{ dsl::{delete, insert_into, sql, update}, @@ -23,18 +24,21 @@ use diesel::{ Connection as _, }; use graph::{ - components::store::DeploymentLocator, + components::store::{ + DeploymentId as GraphDeploymentId, DeploymentSchemaVersion, SubgraphSegment, + SubgraphSegmentId, + }, + prelude::{chrono, CancelHandle, CancelToken}, +}; +use graph::{ + components::store::{DeploymentLocator, SegmentDetails as GraphSegmentDetails}, constraint_violation, data::subgraph::{status, DeploymentFeatures}, prelude::{ - anyhow, bigdecimal::ToPrimitive, serde_json, DeploymentHash, EntityChange, + anyhow, bigdecimal::ToPrimitive, serde_json, BlockNumber, DeploymentHash, EntityChange, EntityChangeOperation, NodeId, StoreError, SubgraphName, SubgraphVersionSwitchingMode, }, }; -use graph::{ - components::store::{DeploymentId as GraphDeploymentId, DeploymentSchemaVersion}, - prelude::{chrono, CancelHandle, CancelToken}, -}; use graph::{data::subgraph::schema::generate_entity_id, prelude::StoreEvent}; use itertools::Itertools; use maybe_owned::MaybeOwnedMut; @@ -89,6 +93,16 @@ table! { } } +table! { + subgraphs.subgraph_segments (id) { + id -> Integer, + deployment -> Integer, + start_block -> Integer, + stop_block -> Integer, + current_block -> Nullable, + } +} + table! { subgraphs.subgraph_version (vid) { vid -> BigInt, @@ -286,6 +300,55 @@ impl Borrow for Namespace { } } +#[derive(Queryable)] +pub struct SegmentDetails { + pub id: SegmentId, + pub deployment: DeploymentId, + pub start_block: BlockNumber, + pub end_block: BlockNumber, + pub current_block: Option, +} + +impl From for GraphSegmentDetails { + fn from(value: SegmentDetails) -> GraphSegmentDetails { + let SegmentDetails { + id, + deployment, + start_block, + end_block, + current_block, + } = value; + + GraphSegmentDetails { + id: id.into(), + deployment: deployment.into(), + start_block, + stop_block: end_block, + current_block, + } + } +} + +impl From for SegmentDetails { + fn from(value: GraphSegmentDetails) -> Self { + let GraphSegmentDetails { + id, + deployment, + start_block, + stop_block: end_block, + current_block, + } = value; + + SegmentDetails { + id: id.into(), + deployment: deployment.into(), + start_block, + end_block, + current_block, + } + } +} + /// A marker that an `i32` references a deployment. Values of this type hold /// the primary key from the `deployment_schemas` table #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, AsExpression, FromSqlRow)] @@ -329,6 +392,30 @@ impl ToSql for DeploymentId { } } +/// A marker that an `i32` references a a segment. Values of this type hold +/// the primary key from the `subgraphs.segments` table; +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, AsExpression, FromSqlRow)] +#[sql_type = "diesel::sql_types::Integer"] +pub struct SegmentId(i32); + +impl FromSql for SegmentId { + fn from_sql(bytes: diesel::pg::PgValue) -> diesel::deserialize::Result { + let id = >::from_sql(bytes)?; + Ok(SegmentId(id)) + } +} + +impl From for SubgraphSegmentId { + fn from(value: SegmentId) -> Self { + Self(value.0) + } +} +impl From for SegmentId { + fn from(value: SubgraphSegmentId) -> Self { + Self(value.0) + } +} + #[derive(Debug, PartialEq)] /// Details about a deployment and the shard in which it is stored. We need /// the database namespace for the deployment as that information is only @@ -1154,7 +1241,7 @@ impl<'a> Connection<'a> { features, data_source_kinds: data_sources, handler_kinds: handlers, - network: network, + network, } }, ); diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index b5cbd528824..79657d615f1 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -17,7 +17,7 @@ use graph::{ server::index_node::VersionInfo, store::{ self, BlockPtrForNumber, BlockStore, DeploymentLocator, EnsLookup as EnsLookupTrait, - PruneReporter, PruneRequest, SubgraphFork, + PruneReporter, PruneRequest, SubgraphFork, SubgraphSegment, SubgraphSegmentId, }, }, constraint_violation, @@ -38,7 +38,7 @@ use graph::{ use crate::{ connection_pool::ConnectionPool, deployment::{OnSync, SubgraphHealth}, - primary, + primary::{self}, primary::{DeploymentId, Mirror as PrimaryMirror, Site}, relational::{index::Method, Layout}, writable::WritableStore, @@ -283,7 +283,7 @@ pub struct SubgraphStoreInner { sites: TimedCache, placer: Arc, sender: Arc, - writables: Mutex>>, + writables: Mutex>>, registry: Arc, } @@ -1454,13 +1454,19 @@ impl SubgraphStoreTrait for SubgraphStore { self: Arc, logger: Logger, deployment: graph::components::store::DeploymentId, + segment: SubgraphSegment, manifest_idx_and_name: Arc>, ) -> Result, StoreError> { let deployment = deployment.into(); // We cache writables to make sure calls to this method are // idempotent and there is ever only one `WritableStore` for any // deployment - if let Some(writable) = self.writables.lock().unwrap().get(&deployment) { + if let Some(writable) = self + .writables + .lock() + .unwrap() + .get(&(deployment, segment.id().unwrap_or_default())) + { // A poisoned writable will not write anything anymore; we // discard it and create a new one that is properly initialized // according to the state in the database. @@ -1482,15 +1488,16 @@ impl SubgraphStoreTrait for SubgraphStore { self.as_ref().clone(), logger, site, + segment.clone(), manifest_idx_and_name, self.registry.clone(), ) .await?, ); - self.writables - .lock() - .unwrap() - .insert(deployment, writable.cheap_clone()); + self.writables.lock().unwrap().insert( + (deployment, segment.id().unwrap_or_default()), + writable.cheap_clone(), + ); Ok(writable) } @@ -1499,7 +1506,11 @@ impl SubgraphStoreTrait for SubgraphStore { // Remove the writable from the cache and stop it let deployment = loc.id.into(); - let writable = self.writables.lock().unwrap().remove(&deployment); + let writable = self + .writables + .lock() + .unwrap() + .remove(&(deployment, SubgraphSegmentId::default())); match writable { Some(writable) => writable.stop().await, None => Ok(()), diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index cd03cca9153..40339579678 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -1,13 +1,16 @@ use std::collections::BTreeSet; -use std::ops::Deref; +use std::ops::{Deref, Range}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Mutex, RwLock, TryLockError as RwLockError}; use std::time::Instant; use std::{collections::BTreeMap, sync::Arc}; +use diesel::RunQueryDsl; use graph::blockchain::block_stream::FirehoseCursor; use graph::blockchain::BlockTime; -use graph::components::store::{Batch, DeploymentCursorTracker, DerivedEntityQuery, ReadStore}; +use graph::components::store::{ + Batch, DeploymentCursorTracker, DerivedEntityQuery, ReadStore, SegmentDetails, SubgraphSegment, +}; use graph::constraint_violation; use graph::data::store::IdList; use graph::data::subgraph::schema; @@ -36,9 +39,11 @@ use store::StoredDynamicDataSource; use crate::deployment_store::DeploymentStore; use crate::primary::DeploymentId; -use crate::retry; +use crate::{deployment, retry}; use crate::{primary, primary::Site, relational::Layout, SubgraphStore}; +const DATASET_BLOCKRANGE_LIMIT: usize = 500; + /// A wrapper around `SubgraphStore` that only exposes functions that are /// safe to call from `WritableStore`, i.e., functions that either do not /// deal with anything that depends on a specific deployment @@ -151,6 +156,7 @@ struct SyncStore { store: WritableSubgraphStore, writable: Arc, site: Arc, + segment: SubgraphSegment, input_schema: InputSchema, manifest_idx_and_name: Arc>, last_rollup: LastRollupTracker, @@ -161,6 +167,7 @@ impl SyncStore { subgraph_store: SubgraphStore, logger: Logger, site: Arc, + segment: SubgraphSegment, manifest_idx_and_name: Arc>, block: Option, ) -> Result { @@ -182,6 +189,7 @@ impl SyncStore { input_schema, manifest_idx_and_name, last_rollup, + segment, }) } @@ -311,6 +319,7 @@ impl SyncStore { let event = self.writable.transact_block_operations( &self.logger, self.site.clone(), + &self.segment, batch, self.last_rollup.get(), stopwatch, @@ -1493,6 +1502,7 @@ impl WritableStore { subgraph_store: SubgraphStore, logger: Logger, site: Arc, + segment: SubgraphSegment, manifest_idx_and_name: Arc>, registry: Arc, ) -> Result { @@ -1505,6 +1515,7 @@ impl WritableStore { subgraph_store, logger.clone(), site, + segment, manifest_idx_and_name, block_ptr.as_ref().map(|ptr| ptr.number), ) @@ -1579,6 +1590,40 @@ impl DeploymentCursorTracker for WritableStore { #[async_trait::async_trait] impl WritableStoreTrait for WritableStore { + async fn mark_subgraph_segment_complete( + &self, + segment: SegmentDetails, + ) -> Result<(), StoreError> { + self.store + .writable + .mark_subgraph_segment_complete(segment.into()) + .await + } + + async fn create_segments( + &self, + deployment: graph::components::store::DeploymentId, + segments: Vec, + ) -> Result, StoreError> { + self.store + .writable + .create_subgraph_segments( + deployment.into(), + segments.into_iter().map(Into::into).collect(), + ) + .await + } + + async fn get_segments( + &self, + deployment: graph::components::store::DeploymentId, + ) -> Result, StoreError> { + self.store + .writable + .subgraph_segments(deployment.into()) + .await + } + async fn start_subgraph_deployment(&self, logger: &Logger) -> Result<(), StoreError> { let store = self.store.cheap_clone(); let logger = logger.cheap_clone(); @@ -1746,7 +1791,12 @@ impl WritableStoreTrait for WritableStore { let store = Arc::new(self.store.store.0.clone()); let manifest_idx_and_name = self.store.manifest_idx_and_name.cheap_clone(); store - .writable(logger, self.store.site.id.into(), manifest_idx_and_name) + .writable( + logger, + self.store.site.id.into(), + self.store.segment.clone(), + manifest_idx_and_name, + ) .await .map(|store| Some(store)) } else { diff --git a/store/test-store/src/store.rs b/store/test-store/src/store.rs index f13b9edbc47..e6f462e6883 100644 --- a/store/test-store/src/store.rs +++ b/store/test-store/src/store.rs @@ -1,6 +1,7 @@ use diesel::{self, PgConnection}; use graph::blockchain::mock::MockDataSource; use graph::blockchain::BlockTime; +use graph::components::store::SubgraphSegment; use graph::data::graphql::load_manager::LoadManager; use graph::data::query::QueryResults; use graph::data::query::QueryTarget; @@ -198,7 +199,12 @@ pub async fn create_subgraph_with_manifest( SUBGRAPH_STORE .cheap_clone() - .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await? .start_subgraph_deployment(&LOGGER) .await?; @@ -287,7 +293,12 @@ pub async fn transact_errors( let block_time = BlockTime::for_test(&block_ptr_to); store .subgraph_store() - .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await? .transact_block_operations( block_ptr_to, @@ -353,6 +364,7 @@ pub async fn transact_entities_and_dynamic_data_sources( let store = futures03::executor::block_on(store.cheap_clone().writable( LOGGER.clone(), deployment.id, + SubgraphSegment::default(), Arc::new(manifest_idx_and_name), ))?; @@ -391,7 +403,12 @@ pub async fn transact_entities_and_dynamic_data_sources( pub async fn revert_block(store: &Arc, deployment: &DeploymentLocator, ptr: &BlockPtr) { store .subgraph_store() - .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await .expect("can get writable") .revert_block_operations(ptr.clone(), FirehoseCursor::None) @@ -442,7 +459,12 @@ pub async fn insert_entities( pub async fn flush(deployment: &DeploymentLocator) -> Result<(), StoreError> { let writable = SUBGRAPH_STORE .cheap_clone() - .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await .expect("we can get a writable"); writable.flush().await diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index d7ebb30785c..52b083135ec 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -2,7 +2,7 @@ use graph::blockchain::block_stream::FirehoseCursor; use graph::blockchain::BlockTime; use graph::components::store::{ DeploymentCursorTracker, DerivedEntityQuery, GetScope, LoadRelatedRequest, ReadStore, - StoredDynamicDataSource, WritableStore, + SegmentDetails, StoredDynamicDataSource, SubgraphSegment, WritableStore, }; use graph::data::store::Id; use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError, SubgraphHealth}; @@ -93,6 +93,30 @@ impl DeploymentCursorTracker for MockStore { #[async_trait] impl WritableStore for MockStore { + async fn mark_subgraph_segment_complete( + &self, + _segment: SegmentDetails, + ) -> Result<(), StoreError> { + unimplemented!() + } + + /// Persists a set of segments according to the provided details. If called more than once + /// this should be a NOOP. + async fn create_segments( + &self, + _deployment: DeploymentId, + _segments: Vec, + ) -> Result, StoreError> { + unimplemented!() + } + + async fn get_segments( + &self, + _deployment: DeploymentId, + ) -> Result, StoreError> { + unimplemented!() + } + async fn start_subgraph_deployment(&self, _: &Logger) -> Result<(), StoreError> { unimplemented!() } @@ -380,7 +404,12 @@ where let deployment = insert_test_data(subgraph_store.clone()).await; let writable = store .subgraph_store() - .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await .expect("we can get a writable store"); diff --git a/store/test-store/tests/postgres/aggregation.rs b/store/test-store/tests/postgres/aggregation.rs index a5dce4ee20b..8846ebfb57b 100644 --- a/store/test-store/tests/postgres/aggregation.rs +++ b/store/test-store/tests/postgres/aggregation.rs @@ -1,6 +1,7 @@ use std::fmt::Write; use std::{future::Future, sync::Arc}; +use graph::components::store::SubgraphSegment; use graph::{ blockchain::{block_stream::FirehoseCursor, BlockPtr, BlockTime}, components::{ @@ -237,7 +238,12 @@ where let loc = create_test_subgraph(&hash, SCHEMA).await; let writable = store .subgraph_store() - .writable(LOGGER.clone(), loc.id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + loc.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await .expect("we can get a writable store"); insert_test_data(writable.clone(), loc.clone()).await; diff --git a/store/test-store/tests/postgres/graft.rs b/store/test-store/tests/postgres/graft.rs index 25b6d16d267..8a233e5ff14 100644 --- a/store/test-store/tests/postgres/graft.rs +++ b/store/test-store/tests/postgres/graft.rs @@ -7,7 +7,7 @@ use test_store::*; use graph::components::store::{ DeploymentLocator, EntityOrder, EntityQuery, PruneReporter, PruneRequest, PruningStrategy, - VersionStats, + SubgraphSegment, VersionStats, }; use graph::data::store::{scalar, Id}; use graph::data::subgraph::schema::*; @@ -121,7 +121,12 @@ where store .cheap_clone() - .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await .unwrap() .flush() @@ -331,7 +336,12 @@ async fn check_graft( .unwrap(); let writable = store - .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await?; writable .revert_block_operations(BLOCKS[1].clone(), FirehoseCursor::None) @@ -445,7 +455,12 @@ fn copy() { store .cheap_clone() - .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await? .start_subgraph_deployment(&LOGGER) .await?; @@ -476,7 +491,12 @@ fn on_sync() { let writable = store .cheap_clone() - .writable(LOGGER.clone(), dst.id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + dst.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await?; writable.start_subgraph_deployment(&LOGGER).await?; @@ -525,7 +545,12 @@ fn on_sync() { let writable = store .cheap_clone() - .writable(LOGGER.clone(), dst.id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + dst.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await?; // Perform the copy diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index bef9dda68c0..2ec8232f820 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -12,7 +12,7 @@ use std::{collections::HashSet, sync::Mutex}; use std::{marker::PhantomData, str::FromStr}; use test_store::*; -use graph::components::store::{DeploymentLocator, ReadStore, WritableStore}; +use graph::components::store::{DeploymentLocator, ReadStore, SubgraphSegment, WritableStore}; use graph::data::subgraph::*; use graph::{ blockchain::DataSource, @@ -144,7 +144,12 @@ where let deployment = insert_test_data(subgraph_store.clone()).await; let writable = store .subgraph_store() - .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await .expect("we can get a writable store"); diff --git a/store/test-store/tests/postgres/subgraph.rs b/store/test-store/tests/postgres/subgraph.rs index 3080c57fde2..68ad36605f5 100644 --- a/store/test-store/tests/postgres/subgraph.rs +++ b/store/test-store/tests/postgres/subgraph.rs @@ -1,7 +1,7 @@ use graph::{ components::{ server::index_node::VersionInfo, - store::{DeploymentId, DeploymentLocator, StatusStore}, + store::{DeploymentId, DeploymentLocator, StatusStore, SubgraphSegment}, }, data::query::QueryTarget, data::subgraph::{schema::SubgraphHealth, SubgraphFeature}, @@ -63,7 +63,12 @@ fn get_subgraph_features(id: String) -> Option { async fn latest_block(store: &Store, deployment_id: DeploymentId) -> BlockPtr { store .subgraph_store() - .writable(LOGGER.clone(), deployment_id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + deployment_id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await .expect("can get writable") .block_ptr() @@ -188,6 +193,7 @@ fn create_subgraph() { futures03::executor::block_on(store.cheap_clone().writable( LOGGER.clone(), deployment.id, + SubgraphSegment::default(), Arc::new(Vec::new()), )) .expect("can get writable") @@ -429,7 +435,12 @@ fn status() { store .subgraph_store() - .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await .expect("can get writable") .fail_subgraph(error) @@ -690,7 +701,12 @@ fn fatal_vs_non_fatal() { store .subgraph_store() - .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await .expect("can get writable") .fail_subgraph(error()) @@ -784,7 +800,12 @@ fn fail_unfail_deterministic_error() { let writable = store .subgraph_store() - .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await .expect("can get writable"); @@ -876,7 +897,12 @@ fn fail_unfail_deterministic_error_noop() { let writable = store .subgraph_store() - .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await .expect("can get writable"); @@ -1003,7 +1029,12 @@ fn fail_unfail_non_deterministic_error() { let writable = store .subgraph_store() - .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await .expect("can get writable"); @@ -1103,7 +1134,12 @@ fn fail_unfail_non_deterministic_error_noop() { let writable = store .subgraph_store() - .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await .expect("can get writable"); diff --git a/store/test-store/tests/postgres/writable.rs b/store/test-store/tests/postgres/writable.rs index 4a986e6f3fa..6f4aa5045a7 100644 --- a/store/test-store/tests/postgres/writable.rs +++ b/store/test-store/tests/postgres/writable.rs @@ -8,7 +8,9 @@ use std::collections::BTreeSet; use std::marker::PhantomData; use test_store::*; -use graph::components::store::{DeploymentLocator, DerivedEntityQuery, WritableStore}; +use graph::components::store::{ + DeploymentLocator, DerivedEntityQuery, SubgraphSegment, WritableStore, +}; use graph::data::subgraph::*; use graph::semver::Version; use graph::{entity, prelude::*}; @@ -92,7 +94,12 @@ where let deployment = insert_test_data(subgraph_store.clone()).await; let writable = store .subgraph_store() - .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) + .writable( + LOGGER.clone(), + deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await .expect("we can get a writable store"); diff --git a/tests/src/fixture/mod.rs b/tests/src/fixture/mod.rs index 6dbdaf97688..50018dd7fda 100644 --- a/tests/src/fixture/mod.rs +++ b/tests/src/fixture/mod.rs @@ -20,7 +20,7 @@ use graph::blockchain::{ use graph::cheap_clone::CheapClone; use graph::components::link_resolver::{ArweaveClient, ArweaveResolver, FileSizeLimit}; use graph::components::metrics::MetricsRegistry; -use graph::components::store::{BlockStore, DeploymentLocator}; +use graph::components::store::{BlockStore, DeploymentLocator, SubgraphSegment}; use graph::components::subgraph::Settings; use graph::data::graphql::load_manager::LoadManager; use graph::data::query::{Query, QueryTarget}; @@ -211,6 +211,7 @@ impl TestContext { raw, Some(stop_block.block_number()), tp, + None, ) .await .unwrap() @@ -234,6 +235,7 @@ impl TestContext { raw, Some(stop_block.block_number()), tp, + None, ) .await .unwrap() @@ -607,7 +609,12 @@ pub async fn wait_for_sync( async fn flush(logger: &Logger, store: &Arc, deployment: &DeploymentLocator) { store .clone() - .writable(logger.clone(), deployment.id, Arc::new(vec![])) + .writable( + logger.clone(), + deployment.id, + SubgraphSegment::default(), + Arc::new(vec![]), + ) .await .unwrap() .flush() diff --git a/tests/tests/runner_tests.rs b/tests/tests/runner_tests.rs index b0a23fb88ff..165e6a50221 100644 --- a/tests/tests/runner_tests.rs +++ b/tests/tests/runner_tests.rs @@ -8,6 +8,7 @@ use std::time::Duration; use assert_json_diff::assert_json_eq; use graph::blockchain::block_stream::BlockWithTriggers; use graph::blockchain::{Block, BlockPtr, Blockchain}; +use graph::components::store::SubgraphSegment; use graph::data::store::scalar::Bytes; use graph::data::subgraph::schema::{SubgraphError, SubgraphHealth}; use graph::data::value::Word; @@ -686,7 +687,12 @@ async fn file_data_sources() { let store = ctx.store.cheap_clone(); let writable = store - .writable(ctx.logger.clone(), ctx.deployment.id, Arc::new(Vec::new())) + .writable( + ctx.logger.clone(), + ctx.deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await .unwrap(); let datasources = writable.load_dynamic_data_sources(vec![]).await.unwrap(); @@ -708,7 +714,12 @@ async fn file_data_sources() { let writable = ctx .store .clone() - .writable(ctx.logger.clone(), ctx.deployment.id, Arc::new(Vec::new())) + .writable( + ctx.logger.clone(), + ctx.deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await .unwrap(); let data_sources = writable.load_dynamic_data_sources(vec![]).await.unwrap(); @@ -725,7 +736,12 @@ async fn file_data_sources() { let writable = ctx .store .clone() - .writable(ctx.logger.clone(), ctx.deployment.id, Arc::new(Vec::new())) + .writable( + ctx.logger.clone(), + ctx.deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await .unwrap(); let data_sources = writable.load_dynamic_data_sources(vec![]).await.unwrap(); @@ -1186,7 +1202,12 @@ async fn arweave_file_data_sources() { let store = ctx.store.cheap_clone(); let writable = store - .writable(ctx.logger.clone(), ctx.deployment.id, Arc::new(Vec::new())) + .writable( + ctx.logger.clone(), + ctx.deployment.id, + SubgraphSegment::default(), + Arc::new(Vec::new()), + ) .await .unwrap(); let datasources = writable.load_dynamic_data_sources(vec![]).await.unwrap(); diff --git a/transforms/uniswap/Cargo.toml b/transforms/uniswap/Cargo.toml new file mode 100644 index 00000000000..3e50e4a5866 --- /dev/null +++ b/transforms/uniswap/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "uniswap" +version = "0.1.0" +edition = "2021" + +[dependencies] +ethabi = "17.0" +prost = "0.11" +prost-types = "0.11" +hex = "0.4" +num-bigint = "0.4" +num-traits = "0.2" +phf = { version = "0.11.1", features = ["macros"] } +substreams = "0.5" +substreams-ethereum = "0.9" +substreams-entity-change = "1.3" +tiny-keccak = "2.0" + +graph = { path = "../../graph" } +borsh = { version = "1.3.1", features = ["derive"] } + +[build-dependencies] +prost-build = "0.11" +anyhow = "1" +substreams-ethereum = "0.9" +tonic-build = { workspace = true } + diff --git a/transforms/uniswap/abis/ERC20.json b/transforms/uniswap/abis/ERC20.json new file mode 100644 index 00000000000..405d6b36486 --- /dev/null +++ b/transforms/uniswap/abis/ERC20.json @@ -0,0 +1,222 @@ +[ + { + "constant": true, + "inputs": [], + "name": "name", + "outputs": [ + { + "name": "", + "type": "string" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": false, + "inputs": [ + { + "name": "_spender", + "type": "address" + }, + { + "name": "_value", + "type": "uint256" + } + ], + "name": "approve", + "outputs": [ + { + "name": "", + "type": "bool" + } + ], + "payable": false, + "stateMutability": "nonpayable", + "type": "function" + }, + { + "constant": true, + "inputs": [], + "name": "totalSupply", + "outputs": [ + { + "name": "", + "type": "uint256" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": false, + "inputs": [ + { + "name": "_from", + "type": "address" + }, + { + "name": "_to", + "type": "address" + }, + { + "name": "_value", + "type": "uint256" + } + ], + "name": "transferFrom", + "outputs": [ + { + "name": "", + "type": "bool" + } + ], + "payable": false, + "stateMutability": "nonpayable", + "type": "function" + }, + { + "constant": true, + "inputs": [], + "name": "decimals", + "outputs": [ + { + "name": "", + "type": "uint8" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": true, + "inputs": [ + { + "name": "_owner", + "type": "address" + } + ], + "name": "balanceOf", + "outputs": [ + { + "name": "balance", + "type": "uint256" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": true, + "inputs": [], + "name": "symbol", + "outputs": [ + { + "name": "", + "type": "string" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": false, + "inputs": [ + { + "name": "_to", + "type": "address" + }, + { + "name": "_value", + "type": "uint256" + } + ], + "name": "transfer", + "outputs": [ + { + "name": "", + "type": "bool" + } + ], + "payable": false, + "stateMutability": "nonpayable", + "type": "function" + }, + { + "constant": true, + "inputs": [ + { + "name": "_owner", + "type": "address" + }, + { + "name": "_spender", + "type": "address" + } + ], + "name": "allowance", + "outputs": [ + { + "name": "", + "type": "uint256" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "payable": true, + "stateMutability": "payable", + "type": "fallback" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "name": "spender", + "type": "address" + }, + { + "indexed": false, + "name": "value", + "type": "uint256" + } + ], + "name": "Approval", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "name": "from", + "type": "address" + }, + { + "indexed": true, + "name": "to", + "type": "address" + }, + { + "indexed": false, + "name": "value", + "type": "uint256" + } + ], + "name": "Transfer", + "type": "event" + } +] diff --git a/transforms/uniswap/abis/ERC20NameBytes.json b/transforms/uniswap/abis/ERC20NameBytes.json new file mode 100644 index 00000000000..2d3c877a8c0 --- /dev/null +++ b/transforms/uniswap/abis/ERC20NameBytes.json @@ -0,0 +1,17 @@ +[ + { + "constant": true, + "inputs": [], + "name": "name", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + } +] diff --git a/transforms/uniswap/abis/ERC20SymbolBytes.json b/transforms/uniswap/abis/ERC20SymbolBytes.json new file mode 100644 index 00000000000..a76d6163668 --- /dev/null +++ b/transforms/uniswap/abis/ERC20SymbolBytes.json @@ -0,0 +1,17 @@ +[ + { + "constant": true, + "inputs": [], + "name": "symbol", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + } +] diff --git a/transforms/uniswap/abis/NonfungiblePositionManager.json b/transforms/uniswap/abis/NonfungiblePositionManager.json new file mode 100644 index 00000000000..29cef050d4e --- /dev/null +++ b/transforms/uniswap/abis/NonfungiblePositionManager.json @@ -0,0 +1,1193 @@ +[ + { + "inputs": [ + { + "internalType": "address", + "name": "_factory", + "type": "address" + }, + { + "internalType": "address", + "name": "_WETH9", + "type": "address" + }, + { + "internalType": "address", + "name": "_tokenDescriptor_", + "type": "address" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "approved", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "Approval", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "operator", + "type": "address" + }, + { + "indexed": false, + "internalType": "bool", + "name": "approved", + "type": "bool" + } + ], + "name": "ApprovalForAll", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "address", + "name": "recipient", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount0", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount1", + "type": "uint256" + } + ], + "name": "Collect", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint128", + "name": "liquidity", + "type": "uint128" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount0", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount1", + "type": "uint256" + } + ], + "name": "DecreaseLiquidity", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint128", + "name": "liquidity", + "type": "uint128" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount0", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount1", + "type": "uint256" + } + ], + "name": "IncreaseLiquidity", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "Transfer", + "type": "event" + }, + { + "inputs": [], + "name": "DOMAIN_SEPARATOR", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "PERMIT_TYPEHASH", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "WETH9", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "approve", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "owner", + "type": "address" + } + ], + "name": "balanceOf", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "baseURI", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "burn", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + }, + { + "internalType": "address", + "name": "recipient", + "type": "address" + }, + { + "internalType": "uint128", + "name": "amount0Max", + "type": "uint128" + }, + { + "internalType": "uint128", + "name": "amount1Max", + "type": "uint128" + } + ], + "name": "collect", + "outputs": [ + { + "internalType": "uint256", + "name": "amount0", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "amount1", + "type": "uint256" + } + ], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "tokenA", + "type": "address" + }, + { + "internalType": "address", + "name": "tokenB", + "type": "address" + }, + { + "internalType": "uint24", + "name": "fee", + "type": "uint24" + }, + { + "internalType": "uint160", + "name": "sqrtPriceX96", + "type": "uint160" + } + ], + "name": "createAndInitializePoolIfNecessary", + "outputs": [ + { + "internalType": "address", + "name": "pool", + "type": "address" + } + ], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + }, + { + "internalType": "uint128", + "name": "liquidity", + "type": "uint128" + }, + { + "internalType": "uint256", + "name": "amount0Min", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "amount1Min", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "deadline", + "type": "uint256" + } + ], + "name": "decreaseLiquidity", + "outputs": [ + { + "internalType": "uint256", + "name": "amount0", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "amount1", + "type": "uint256" + } + ], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "factory", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "getApproved", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "amount0Desired", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "amount1Desired", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "amount0Min", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "amount1Min", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "deadline", + "type": "uint256" + } + ], + "name": "increaseLiquidity", + "outputs": [ + { + "internalType": "uint128", + "name": "liquidity", + "type": "uint128" + }, + { + "internalType": "uint256", + "name": "amount0", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "amount1", + "type": "uint256" + } + ], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "internalType": "address", + "name": "operator", + "type": "address" + } + ], + "name": "isApprovedForAll", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "address", + "name": "token0", + "type": "address" + }, + { + "internalType": "address", + "name": "token1", + "type": "address" + }, + { + "internalType": "uint24", + "name": "fee", + "type": "uint24" + }, + { + "internalType": "int24", + "name": "tickLower", + "type": "int24" + }, + { + "internalType": "int24", + "name": "tickUpper", + "type": "int24" + }, + { + "internalType": "uint256", + "name": "amount0Desired", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "amount1Desired", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "amount0Min", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "amount1Min", + "type": "uint256" + }, + { + "internalType": "address", + "name": "recipient", + "type": "address" + }, + { + "internalType": "uint256", + "name": "deadline", + "type": "uint256" + } + ], + "internalType": "struct INonfungiblePositionManager.MintParams", + "name": "params", + "type": "tuple" + } + ], + "name": "mint", + "outputs": [ + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + }, + { + "internalType": "uint128", + "name": "liquidity", + "type": "uint128" + }, + { + "internalType": "uint256", + "name": "amount0", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "amount1", + "type": "uint256" + } + ], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes[]", + "name": "data", + "type": "bytes[]" + } + ], + "name": "multicall", + "outputs": [ + { + "internalType": "bytes[]", + "name": "results", + "type": "bytes[]" + } + ], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "name", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "ownerOf", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "spender", + "type": "address" + }, + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "deadline", + "type": "uint256" + }, + { + "internalType": "uint8", + "name": "v", + "type": "uint8" + }, + { + "internalType": "bytes32", + "name": "r", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "s", + "type": "bytes32" + } + ], + "name": "permit", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "positions", + "outputs": [ + { + "internalType": "uint96", + "name": "nonce", + "type": "uint96" + }, + { + "internalType": "address", + "name": "operator", + "type": "address" + }, + { + "internalType": "address", + "name": "token0", + "type": "address" + }, + { + "internalType": "address", + "name": "token1", + "type": "address" + }, + { + "internalType": "uint24", + "name": "fee", + "type": "uint24" + }, + { + "internalType": "int24", + "name": "tickLower", + "type": "int24" + }, + { + "internalType": "int24", + "name": "tickUpper", + "type": "int24" + }, + { + "internalType": "uint128", + "name": "liquidity", + "type": "uint128" + }, + { + "internalType": "uint256", + "name": "feeGrowthInside0LastX128", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "feeGrowthInside1LastX128", + "type": "uint256" + }, + { + "internalType": "uint128", + "name": "tokensOwed0", + "type": "uint128" + }, + { + "internalType": "uint128", + "name": "tokensOwed1", + "type": "uint128" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "safeTransferFrom", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "_data", + "type": "bytes" + } + ], + "name": "safeTransferFrom", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "deadline", + "type": "uint256" + }, + { + "internalType": "uint8", + "name": "v", + "type": "uint8" + }, + { + "internalType": "bytes32", + "name": "r", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "s", + "type": "bytes32" + } + ], + "name": "selfPermit", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "uint256", + "name": "nonce", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "expiry", + "type": "uint256" + }, + { + "internalType": "uint8", + "name": "v", + "type": "uint8" + }, + { + "internalType": "bytes32", + "name": "r", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "s", + "type": "bytes32" + } + ], + "name": "selfPermitAllowed", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "uint256", + "name": "nonce", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "expiry", + "type": "uint256" + }, + { + "internalType": "uint8", + "name": "v", + "type": "uint8" + }, + { + "internalType": "bytes32", + "name": "r", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "s", + "type": "bytes32" + } + ], + "name": "selfPermitAllowedIfNecessary", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "deadline", + "type": "uint256" + }, + { + "internalType": "uint8", + "name": "v", + "type": "uint8" + }, + { + "internalType": "bytes32", + "name": "r", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "s", + "type": "bytes32" + } + ], + "name": "selfPermitIfNecessary", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "operator", + "type": "address" + }, + { + "internalType": "bool", + "name": "approved", + "type": "bool" + } + ], + "name": "setApprovalForAll", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes4", + "name": "interfaceId", + "type": "bytes4" + } + ], + "name": "supportsInterface", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "token", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amountMinimum", + "type": "uint256" + }, + { + "internalType": "address", + "name": "recipient", + "type": "address" + } + ], + "name": "sweepToken", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "symbol", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "index", + "type": "uint256" + } + ], + "name": "tokenByIndex", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "internalType": "uint256", + "name": "index", + "type": "uint256" + } + ], + "name": "tokenOfOwnerByIndex", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "tokenURI", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "totalSupply", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "tokenId", + "type": "uint256" + } + ], + "name": "transferFrom", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "amount0Owed", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "amount1Owed", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "data", + "type": "bytes" + } + ], + "name": "uniswapV3MintCallback", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "amountMinimum", + "type": "uint256" + }, + { + "internalType": "address", + "name": "recipient", + "type": "address" + } + ], + "name": "unwrapWETH9", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "stateMutability": "payable", + "type": "receive" + } +] \ No newline at end of file diff --git a/transforms/uniswap/abis/factory.json b/transforms/uniswap/abis/factory.json new file mode 100644 index 00000000000..250321295db --- /dev/null +++ b/transforms/uniswap/abis/factory.json @@ -0,0 +1,198 @@ +[ + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "uint24", + "name": "fee", + "type": "uint24" + }, + { + "indexed": true, + "internalType": "int24", + "name": "tickSpacing", + "type": "int24" + } + ], + "name": "FeeAmountEnabled", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "oldOwner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "newOwner", + "type": "address" + } + ], + "name": "OwnerChanged", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "token0", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "token1", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint24", + "name": "fee", + "type": "uint24" + }, + { + "indexed": false, + "internalType": "int24", + "name": "tickSpacing", + "type": "int24" + }, + { + "indexed": false, + "internalType": "address", + "name": "pool", + "type": "address" + } + ], + "name": "PoolCreated", + "type": "event" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "tokenA", + "type": "address" + }, + { + "internalType": "address", + "name": "tokenB", + "type": "address" + }, + { + "internalType": "uint24", + "name": "fee", + "type": "uint24" + } + ], + "name": "createPool", + "outputs": [ + { + "internalType": "address", + "name": "pool", + "type": "address" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint24", + "name": "fee", + "type": "uint24" + }, + { + "internalType": "int24", + "name": "tickSpacing", + "type": "int24" + } + ], + "name": "enableFeeAmount", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint24", + "name": "fee", + "type": "uint24" + } + ], + "name": "feeAmountTickSpacing", + "outputs": [ + { + "internalType": "int24", + "name": "", + "type": "int24" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "tokenA", + "type": "address" + }, + { + "internalType": "address", + "name": "tokenB", + "type": "address" + }, + { + "internalType": "uint24", + "name": "fee", + "type": "uint24" + } + ], + "name": "getPool", + "outputs": [ + { + "internalType": "address", + "name": "pool", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "owner", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_owner", + "type": "address" + } + ], + "name": "setOwner", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } +] \ No newline at end of file diff --git a/transforms/uniswap/abis/pool.json b/transforms/uniswap/abis/pool.json new file mode 100644 index 00000000000..c87d64ddf86 --- /dev/null +++ b/transforms/uniswap/abis/pool.json @@ -0,0 +1,988 @@ +[ + { + "inputs": [], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "internalType": "int24", + "name": "tickLower", + "type": "int24" + }, + { + "indexed": true, + "internalType": "int24", + "name": "tickUpper", + "type": "int24" + }, + { + "indexed": false, + "internalType": "uint128", + "name": "amount", + "type": "uint128" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount0", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount1", + "type": "uint256" + } + ], + "name": "Burn", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": false, + "internalType": "address", + "name": "recipient", + "type": "address" + }, + { + "indexed": true, + "internalType": "int24", + "name": "tickLower", + "type": "int24" + }, + { + "indexed": true, + "internalType": "int24", + "name": "tickUpper", + "type": "int24" + }, + { + "indexed": false, + "internalType": "uint128", + "name": "amount0", + "type": "uint128" + }, + { + "indexed": false, + "internalType": "uint128", + "name": "amount1", + "type": "uint128" + } + ], + "name": "Collect", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "recipient", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint128", + "name": "amount0", + "type": "uint128" + }, + { + "indexed": false, + "internalType": "uint128", + "name": "amount1", + "type": "uint128" + } + ], + "name": "CollectProtocol", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "recipient", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount0", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount1", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "paid0", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "paid1", + "type": "uint256" + } + ], + "name": "Flash", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint16", + "name": "observationCardinalityNextOld", + "type": "uint16" + }, + { + "indexed": false, + "internalType": "uint16", + "name": "observationCardinalityNextNew", + "type": "uint16" + } + ], + "name": "IncreaseObservationCardinalityNext", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint160", + "name": "sqrtPriceX96", + "type": "uint160" + }, + { + "indexed": false, + "internalType": "int24", + "name": "tick", + "type": "int24" + } + ], + "name": "Initialize", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "internalType": "int24", + "name": "tickLower", + "type": "int24" + }, + { + "indexed": true, + "internalType": "int24", + "name": "tickUpper", + "type": "int24" + }, + { + "indexed": false, + "internalType": "uint128", + "name": "amount", + "type": "uint128" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount0", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount1", + "type": "uint256" + } + ], + "name": "Mint", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint8", + "name": "feeProtocol0Old", + "type": "uint8" + }, + { + "indexed": false, + "internalType": "uint8", + "name": "feeProtocol1Old", + "type": "uint8" + }, + { + "indexed": false, + "internalType": "uint8", + "name": "feeProtocol0New", + "type": "uint8" + }, + { + "indexed": false, + "internalType": "uint8", + "name": "feeProtocol1New", + "type": "uint8" + } + ], + "name": "SetFeeProtocol", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "recipient", + "type": "address" + }, + { + "indexed": false, + "internalType": "int256", + "name": "amount0", + "type": "int256" + }, + { + "indexed": false, + "internalType": "int256", + "name": "amount1", + "type": "int256" + }, + { + "indexed": false, + "internalType": "uint160", + "name": "sqrtPriceX96", + "type": "uint160" + }, + { + "indexed": false, + "internalType": "uint128", + "name": "liquidity", + "type": "uint128" + }, + { + "indexed": false, + "internalType": "int24", + "name": "tick", + "type": "int24" + } + ], + "name": "Swap", + "type": "event" + }, + { + "inputs": [ + { + "internalType": "int24", + "name": "tickLower", + "type": "int24" + }, + { + "internalType": "int24", + "name": "tickUpper", + "type": "int24" + }, + { + "internalType": "uint128", + "name": "amount", + "type": "uint128" + } + ], + "name": "burn", + "outputs": [ + { + "internalType": "uint256", + "name": "amount0", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "amount1", + "type": "uint256" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "recipient", + "type": "address" + }, + { + "internalType": "int24", + "name": "tickLower", + "type": "int24" + }, + { + "internalType": "int24", + "name": "tickUpper", + "type": "int24" + }, + { + "internalType": "uint128", + "name": "amount0Requested", + "type": "uint128" + }, + { + "internalType": "uint128", + "name": "amount1Requested", + "type": "uint128" + } + ], + "name": "collect", + "outputs": [ + { + "internalType": "uint128", + "name": "amount0", + "type": "uint128" + }, + { + "internalType": "uint128", + "name": "amount1", + "type": "uint128" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "recipient", + "type": "address" + }, + { + "internalType": "uint128", + "name": "amount0Requested", + "type": "uint128" + }, + { + "internalType": "uint128", + "name": "amount1Requested", + "type": "uint128" + } + ], + "name": "collectProtocol", + "outputs": [ + { + "internalType": "uint128", + "name": "amount0", + "type": "uint128" + }, + { + "internalType": "uint128", + "name": "amount1", + "type": "uint128" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "factory", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "fee", + "outputs": [ + { + "internalType": "uint24", + "name": "", + "type": "uint24" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "feeGrowthGlobal0X128", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "feeGrowthGlobal1X128", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "recipient", + "type": "address" + }, + { + "internalType": "uint256", + "name": "amount0", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "amount1", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "data", + "type": "bytes" + } + ], + "name": "flash", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint16", + "name": "observationCardinalityNext", + "type": "uint16" + } + ], + "name": "increaseObservationCardinalityNext", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint160", + "name": "sqrtPriceX96", + "type": "uint160" + } + ], + "name": "initialize", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "liquidity", + "outputs": [ + { + "internalType": "uint128", + "name": "", + "type": "uint128" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "maxLiquidityPerTick", + "outputs": [ + { + "internalType": "uint128", + "name": "", + "type": "uint128" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "recipient", + "type": "address" + }, + { + "internalType": "int24", + "name": "tickLower", + "type": "int24" + }, + { + "internalType": "int24", + "name": "tickUpper", + "type": "int24" + }, + { + "internalType": "uint128", + "name": "amount", + "type": "uint128" + }, + { + "internalType": "bytes", + "name": "data", + "type": "bytes" + } + ], + "name": "mint", + "outputs": [ + { + "internalType": "uint256", + "name": "amount0", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "amount1", + "type": "uint256" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "index", + "type": "uint256" + } + ], + "name": "observations", + "outputs": [ + { + "internalType": "uint32", + "name": "blockTimestamp", + "type": "uint32" + }, + { + "internalType": "int56", + "name": "tickCumulative", + "type": "int56" + }, + { + "internalType": "uint160", + "name": "secondsPerLiquidityCumulativeX128", + "type": "uint160" + }, + { + "internalType": "bool", + "name": "initialized", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint32[]", + "name": "secondsAgos", + "type": "uint32[]" + } + ], + "name": "observe", + "outputs": [ + { + "internalType": "int56[]", + "name": "tickCumulatives", + "type": "int56[]" + }, + { + "internalType": "uint160[]", + "name": "secondsPerLiquidityCumulativeX128s", + "type": "uint160[]" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "key", + "type": "bytes32" + } + ], + "name": "positions", + "outputs": [ + { + "internalType": "uint128", + "name": "_liquidity", + "type": "uint128" + }, + { + "internalType": "uint256", + "name": "feeGrowthInside0LastX128", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "feeGrowthInside1LastX128", + "type": "uint256" + }, + { + "internalType": "uint128", + "name": "tokensOwed0", + "type": "uint128" + }, + { + "internalType": "uint128", + "name": "tokensOwed1", + "type": "uint128" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "protocolFees", + "outputs": [ + { + "internalType": "uint128", + "name": "token0", + "type": "uint128" + }, + { + "internalType": "uint128", + "name": "token1", + "type": "uint128" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint8", + "name": "feeProtocol0", + "type": "uint8" + }, + { + "internalType": "uint8", + "name": "feeProtocol1", + "type": "uint8" + } + ], + "name": "setFeeProtocol", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "slot0", + "outputs": [ + { + "internalType": "uint160", + "name": "sqrtPriceX96", + "type": "uint160" + }, + { + "internalType": "int24", + "name": "tick", + "type": "int24" + }, + { + "internalType": "uint16", + "name": "observationIndex", + "type": "uint16" + }, + { + "internalType": "uint16", + "name": "observationCardinality", + "type": "uint16" + }, + { + "internalType": "uint16", + "name": "observationCardinalityNext", + "type": "uint16" + }, + { + "internalType": "uint8", + "name": "feeProtocol", + "type": "uint8" + }, + { + "internalType": "bool", + "name": "unlocked", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "int24", + "name": "tickLower", + "type": "int24" + }, + { + "internalType": "int24", + "name": "tickUpper", + "type": "int24" + } + ], + "name": "snapshotCumulativesInside", + "outputs": [ + { + "internalType": "int56", + "name": "tickCumulativeInside", + "type": "int56" + }, + { + "internalType": "uint160", + "name": "secondsPerLiquidityInsideX128", + "type": "uint160" + }, + { + "internalType": "uint32", + "name": "secondsInside", + "type": "uint32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "recipient", + "type": "address" + }, + { + "internalType": "bool", + "name": "zeroForOne", + "type": "bool" + }, + { + "internalType": "int256", + "name": "amountSpecified", + "type": "int256" + }, + { + "internalType": "uint160", + "name": "sqrtPriceLimitX96", + "type": "uint160" + }, + { + "internalType": "bytes", + "name": "data", + "type": "bytes" + } + ], + "name": "swap", + "outputs": [ + { + "internalType": "int256", + "name": "amount0", + "type": "int256" + }, + { + "internalType": "int256", + "name": "amount1", + "type": "int256" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "int16", + "name": "wordPosition", + "type": "int16" + } + ], + "name": "tickBitmap", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "tickSpacing", + "outputs": [ + { + "internalType": "int24", + "name": "", + "type": "int24" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "int24", + "name": "tick", + "type": "int24" + } + ], + "name": "ticks", + "outputs": [ + { + "internalType": "uint128", + "name": "liquidityGross", + "type": "uint128" + }, + { + "internalType": "int128", + "name": "liquidityNet", + "type": "int128" + }, + { + "internalType": "uint256", + "name": "feeGrowthOutside0X128", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "feeGrowthOutside1X128", + "type": "uint256" + }, + { + "internalType": "int56", + "name": "tickCumulativeOutside", + "type": "int56" + }, + { + "internalType": "uint160", + "name": "secondsPerLiquidityOutsideX128", + "type": "uint160" + }, + { + "internalType": "uint32", + "name": "secondsOutside", + "type": "uint32" + }, + { + "internalType": "bool", + "name": "initialized", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "token0", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "token1", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + } +] \ No newline at end of file diff --git a/transforms/uniswap/build.rs b/transforms/uniswap/build.rs new file mode 100644 index 00000000000..c528cda2b46 --- /dev/null +++ b/transforms/uniswap/build.rs @@ -0,0 +1,26 @@ +use anyhow::{Ok, Result}; +use substreams_ethereum::Abigen; + +fn main() -> Result<(), anyhow::Error> { + Abigen::new("pool", "abis/pool.json")? + .generate()? + .write_to_file("src/abi/pool.rs")?; + Abigen::new("erc20", "abis/ERC20.json")? + .generate()? + .write_to_file("src/abi/erc20.rs")?; + Abigen::new("factory", "abis/factory.json")? + .generate()? + .write_to_file("src/abi/factory.rs")?; + Abigen::new("positionmanager", "abis/NonfungiblePositionManager.json")? + .generate()? + .write_to_file("src/abi/positionmanager.rs")?; + + tonic_build::configure() + .protoc_arg("--experimental_allow_proto3_optional") + .include_file("mod.rs") + .out_dir("src/proto") + .compile(&["proto/uniswap.proto"], &["proto"]) + .expect("Failed to compile Uniswap proto(s)"); + + Ok(()) +} diff --git a/transforms/uniswap/proto/uniswap.proto b/transforms/uniswap/proto/uniswap.proto new file mode 100644 index 00000000000..77b56b6deab --- /dev/null +++ b/transforms/uniswap/proto/uniswap.proto @@ -0,0 +1,140 @@ +syntax = "proto3"; + +package edgeandnode.uniswap.v1; + +import "google/protobuf/any.proto"; + +enum EventType{ + // Factory + POOL_CREATED = 0; + + // Position Manager + INCREASE_LIQUIDITY = 1; + DECREASE_LIQUIDITY = 2; + COLLECT = 3; + TRANSFER = 4; + + // Pool + INITIALIZE = 5; + SWAP = 6; + MINT = 7; + BURN = 8; + FLASH = 9; +} + +message Events { + repeated Event events = 1; +} + +// Every address is stored as hex string. +message Event { + // Owner points to the address that originated this event + // The PoolCreated will set this to factory, which is what we can use + // to track different factories with compatible events. + string owner = 1; + EventType type = 2; + google.protobuf.Any event = 3; + string address= 4; + string tx_hash = 5; + string tx_gas_used = 6; + string tx_gas_price = 7; + // This duplicates data (as opposed to adding this data to the head) but AssemblyScript does + // not support closures and so using the data is not super easy if it's in the header so I'll + // leave it here. + int32 block_number = 8; + string block_timestamp = 9; + oneof event2 { + PoolCreated poolcreated = 10; + IncreaseLiquidity increaseliquidity = 11; + DecreaseLiquidity decreaseliquidity = 12; + Collect collect = 13; + Transfer transfer = 14; + Initialize initialize = 15; + Swap swap = 16; + Mint mint = 17; + Burn burn = 18; + Flash flash = 19; + } +} + +// Factory +message PoolCreated { + string token0 = 1; + string token1 = 2; + string fee = 3; + string tick_spacing = 4; + string pool = 5; +} + +// Position Manager +message IncreaseLiquidity { + string token_id = 1; + string liquidity = 2; + string amount0 = 3; + string amount1 = 4; +} + +message DecreaseLiquidity { + string token_id = 1; + string liquidity = 2; + string amount0 = 3; + string amount1 = 4; +} + +message Collect { + string token_id = 1; + string recipient = 2; + string amount0 = 3; + string amount1 = 4; +} +message Transfer { + string from = 1; + string to = 2; + string token_id = 3; +} + +// Pool +message Initialize { + string sqrt_price_x96 = 1; + string tick = 2; +} +message Swap { + string sender = 1; + string recipient = 2; + string amount0 = 3; + string amount1 = 4; + string sqrt_price_x96 = 5; + string liquidity = 6; + string tick = 7; + int32 log_index = 8; + string transaction_from = 9; +} +message Mint { + string sender = 1; + string owner = 2; + string tick_lower = 3; + string tick_upper = 4; + string amount = 5; + string amount0 = 6; + string amount1 = 7; + int32 log_index = 8; + string transaction_from = 9; +} +message Burn { + string owner = 1; + string tick_lower = 2; + string tick_upper = 3; + string amount = 4; + string amount0 = 5; + string amount1 = 6; + int32 log_index = 7; + string transaction_from = 8; +} +message Flash { + string sender = 1; + string recipient = 2; + string amount0 = 3; + string amount1 = 4; + string paid0 = 5; + string paid1 = 6; +} diff --git a/transforms/uniswap/src/abi/erc20.rs b/transforms/uniswap/src/abi/erc20.rs new file mode 100644 index 00000000000..26b94a49b12 --- /dev/null +++ b/transforms/uniswap/src/abi/erc20.rs @@ -0,0 +1,1249 @@ + const INTERNAL_ERR: &'static str = "`ethabi_derive` internal error"; + /// Contract's functions. + #[allow(dead_code, unused_imports, unused_variables)] + pub mod functions { + use super::INTERNAL_ERR; + #[derive(Debug, Clone, PartialEq)] + pub struct Allowance { + pub owner: Vec, + pub spender: Vec, + } + impl Allowance { + const METHOD_ID: [u8; 4] = [221u8, 98u8, 237u8, 62u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Address, ethabi::ParamType::Address], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + owner: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + spender: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address(ethabi::Address::from_slice(&self.owner)), + ethabi::Token::Address( + ethabi::Address::from_slice(&self.spender), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok({ + let mut v = [0 as u8; 32]; + values + .pop() + .expect("one output data should have existed") + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Allowance { + const NAME: &'static str = "allowance"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable + for Allowance { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Approve { + pub spender: Vec, + pub value: substreams::scalar::BigInt, + } + impl Approve { + const METHOD_ID: [u8; 4] = [9u8, 94u8, 167u8, 179u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Address, ethabi::ParamType::Uint(256usize)], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + spender: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + value: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address( + ethabi::Address::from_slice(&self.spender), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.value.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Bool], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_bool() + .expect(INTERNAL_ERR), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Approve { + const NAME: &'static str = "approve"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable for Approve { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct BalanceOf { + pub owner: Vec, + } + impl BalanceOf { + const METHOD_ID: [u8; 4] = [112u8, 160u8, 130u8, 49u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Address], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + owner: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ethabi::Token::Address(ethabi::Address::from_slice(&self.owner))], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok({ + let mut v = [0 as u8; 32]; + values + .pop() + .expect("one output data should have existed") + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for BalanceOf { + const NAME: &'static str = "balanceOf"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable + for BalanceOf { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Decimals {} + impl Decimals { + const METHOD_ID: [u8; 4] = [49u8, 60u8, 229u8, 103u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(8usize)], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok({ + let mut v = [0 as u8; 32]; + values + .pop() + .expect("one output data should have existed") + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Decimals { + const NAME: &'static str = "decimals"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable + for Decimals { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Name {} + impl Name { + const METHOD_ID: [u8; 4] = [6u8, 253u8, 222u8, 3u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::String], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_string() + .expect(INTERNAL_ERR), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Name { + const NAME: &'static str = "name"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable for Name { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Symbol {} + impl Symbol { + const METHOD_ID: [u8; 4] = [149u8, 216u8, 155u8, 65u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::String], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_string() + .expect(INTERNAL_ERR), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Symbol { + const NAME: &'static str = "symbol"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable for Symbol { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct TotalSupply {} + impl TotalSupply { + const METHOD_ID: [u8; 4] = [24u8, 22u8, 13u8, 221u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok({ + let mut v = [0 as u8; 32]; + values + .pop() + .expect("one output data should have existed") + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for TotalSupply { + const NAME: &'static str = "totalSupply"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable + for TotalSupply { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Transfer { + pub to: Vec, + pub value: substreams::scalar::BigInt, + } + impl Transfer { + const METHOD_ID: [u8; 4] = [169u8, 5u8, 156u8, 187u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Address, ethabi::ParamType::Uint(256usize)], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + to: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + value: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address(ethabi::Address::from_slice(&self.to)), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.value.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Bool], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_bool() + .expect(INTERNAL_ERR), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Transfer { + const NAME: &'static str = "transfer"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable for Transfer { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct TransferFrom { + pub from: Vec, + pub to: Vec, + pub value: substreams::scalar::BigInt, + } + impl TransferFrom { + const METHOD_ID: [u8; 4] = [35u8, 184u8, 114u8, 221u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Address, + ethabi::ParamType::Uint(256usize), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + from: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + to: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + value: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address(ethabi::Address::from_slice(&self.from)), + ethabi::Token::Address(ethabi::Address::from_slice(&self.to)), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.value.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Bool], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_bool() + .expect(INTERNAL_ERR), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for TransferFrom { + const NAME: &'static str = "transferFrom"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable for TransferFrom { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + } + /// Contract's events. + #[allow(dead_code, unused_imports, unused_variables)] + pub mod events { + use super::INTERNAL_ERR; + #[derive(Debug, Clone, PartialEq)] + pub struct Approval { + pub owner: Vec, + pub spender: Vec, + pub value: substreams::scalar::BigInt, + } + impl Approval { + const TOPIC_ID: [u8; 32] = [ + 140u8, + 91u8, + 225u8, + 229u8, + 235u8, + 236u8, + 125u8, + 91u8, + 209u8, + 79u8, + 113u8, + 66u8, + 125u8, + 30u8, + 132u8, + 243u8, + 221u8, + 3u8, + 20u8, + 192u8, + 247u8, + 178u8, + 41u8, + 30u8, + 91u8, + 32u8, + 10u8, + 200u8, + 199u8, + 195u8, + 185u8, + 37u8, + ]; + pub fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + if log.topics.len() != 3usize { + return false; + } + if log.data.len() != 32usize { + return false; + } + return log.topics.get(0).expect("bounds already checked").as_ref() + == Self::TOPIC_ID; + } + pub fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + log.data.as_ref(), + ) + .map_err(|e| format!("unable to decode log.data: {:?}", e))?; + values.reverse(); + Ok(Self { + owner: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[1usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'owner' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + spender: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[2usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'spender' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + value: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + } + impl substreams_ethereum::Event for Approval { + const NAME: &'static str = "Approval"; + fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + Self::match_log(log) + } + fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Self::decode(log) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Transfer { + pub from: Vec, + pub to: Vec, + pub value: substreams::scalar::BigInt, + } + impl Transfer { + const TOPIC_ID: [u8; 32] = [ + 221u8, + 242u8, + 82u8, + 173u8, + 27u8, + 226u8, + 200u8, + 155u8, + 105u8, + 194u8, + 176u8, + 104u8, + 252u8, + 55u8, + 141u8, + 170u8, + 149u8, + 43u8, + 167u8, + 241u8, + 99u8, + 196u8, + 161u8, + 22u8, + 40u8, + 245u8, + 90u8, + 77u8, + 245u8, + 35u8, + 179u8, + 239u8, + ]; + pub fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + if log.topics.len() != 3usize { + return false; + } + if log.data.len() != 32usize { + return false; + } + return log.topics.get(0).expect("bounds already checked").as_ref() + == Self::TOPIC_ID; + } + pub fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + log.data.as_ref(), + ) + .map_err(|e| format!("unable to decode log.data: {:?}", e))?; + values.reverse(); + Ok(Self { + from: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[1usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'from' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + to: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[2usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'to' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + value: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + } + impl substreams_ethereum::Event for Transfer { + const NAME: &'static str = "Transfer"; + fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + Self::match_log(log) + } + fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Self::decode(log) + } + } + } \ No newline at end of file diff --git a/transforms/uniswap/src/abi/factory.rs b/transforms/uniswap/src/abi/factory.rs new file mode 100644 index 00000000000..81386231e3d --- /dev/null +++ b/transforms/uniswap/src/abi/factory.rs @@ -0,0 +1,1028 @@ + const INTERNAL_ERR: &'static str = "`ethabi_derive` internal error"; + /// Contract's functions. + #[allow(dead_code, unused_imports, unused_variables)] + pub mod functions { + use super::INTERNAL_ERR; + #[derive(Debug, Clone, PartialEq)] + pub struct CreatePool { + pub token_a: Vec, + pub token_b: Vec, + pub fee: substreams::scalar::BigInt, + } + impl CreatePool { + const METHOD_ID: [u8; 4] = [161u8, 103u8, 18u8, 149u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Address, + ethabi::ParamType::Uint(24usize), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + token_a: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + token_b: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + fee: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address( + ethabi::Address::from_slice(&self.token_a), + ), + ethabi::Token::Address( + ethabi::Address::from_slice(&self.token_b), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.fee.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result, String> { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result, String> { + let mut values = ethabi::decode( + &[ethabi::ParamType::Address], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for CreatePool { + const NAME: &'static str = "createPool"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable> for CreatePool { + fn output(data: &[u8]) -> Result, String> { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct EnableFeeAmount { + pub fee: substreams::scalar::BigInt, + pub tick_spacing: substreams::scalar::BigInt, + } + impl EnableFeeAmount { + const METHOD_ID: [u8; 4] = [138u8, 124u8, 25u8, 95u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(24usize), + ethabi::ParamType::Int(24usize), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + fee: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + tick_spacing: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.fee.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + { + let non_full_signed_bytes = self + .tick_spacing + .to_signed_bytes_be(); + let full_signed_bytes_init = if non_full_signed_bytes[0] + & 0x80 == 0x80 + { + 0xff + } else { + 0x00 + }; + let mut full_signed_bytes = [full_signed_bytes_init + as u8; 32]; + non_full_signed_bytes + .into_iter() + .rev() + .enumerate() + .for_each(|(i, byte)| full_signed_bytes[31 - i] = byte); + ethabi::Token::Int( + ethabi::Int::from_big_endian(full_signed_bytes.as_ref()), + ) + }, + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + } + impl substreams_ethereum::Function for EnableFeeAmount { + const NAME: &'static str = "enableFeeAmount"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct FeeAmountTickSpacing { + pub fee: substreams::scalar::BigInt, + } + impl FeeAmountTickSpacing { + const METHOD_ID: [u8; 4] = [34u8, 175u8, 204u8, 203u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(24usize)], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + fee: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.fee.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Int(24usize)], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok({ + let mut v = [0 as u8; 32]; + values + .pop() + .expect("one output data should have existed") + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for FeeAmountTickSpacing { + const NAME: &'static str = "feeAmountTickSpacing"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable + for FeeAmountTickSpacing { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct GetPool { + pub token_a: Vec, + pub token_b: Vec, + pub fee: substreams::scalar::BigInt, + } + impl GetPool { + const METHOD_ID: [u8; 4] = [22u8, 152u8, 238u8, 130u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Address, + ethabi::ParamType::Uint(24usize), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + token_a: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + token_b: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + fee: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address( + ethabi::Address::from_slice(&self.token_a), + ), + ethabi::Token::Address( + ethabi::Address::from_slice(&self.token_b), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.fee.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result, String> { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result, String> { + let mut values = ethabi::decode( + &[ethabi::ParamType::Address], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for GetPool { + const NAME: &'static str = "getPool"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable> for GetPool { + fn output(data: &[u8]) -> Result, String> { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Owner {} + impl Owner { + const METHOD_ID: [u8; 4] = [141u8, 165u8, 203u8, 91u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result, String> { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result, String> { + let mut values = ethabi::decode( + &[ethabi::ParamType::Address], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Owner { + const NAME: &'static str = "owner"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable> for Owner { + fn output(data: &[u8]) -> Result, String> { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct SetOwner { + pub owner: Vec, + } + impl SetOwner { + const METHOD_ID: [u8; 4] = [19u8, 175u8, 64u8, 53u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Address], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + owner: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ethabi::Token::Address(ethabi::Address::from_slice(&self.owner))], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + } + impl substreams_ethereum::Function for SetOwner { + const NAME: &'static str = "setOwner"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + } + /// Contract's events. + #[allow(dead_code, unused_imports, unused_variables)] + pub mod events { + use super::INTERNAL_ERR; + #[derive(Debug, Clone, PartialEq)] + pub struct FeeAmountEnabled { + pub fee: substreams::scalar::BigInt, + pub tick_spacing: substreams::scalar::BigInt, + } + impl FeeAmountEnabled { + const TOPIC_ID: [u8; 32] = [ + 198u8, + 106u8, + 63u8, + 223u8, + 7u8, + 35u8, + 44u8, + 221u8, + 24u8, + 95u8, + 235u8, + 204u8, + 101u8, + 121u8, + 212u8, + 8u8, + 194u8, + 65u8, + 180u8, + 122u8, + 226u8, + 249u8, + 144u8, + 125u8, + 132u8, + 190u8, + 101u8, + 81u8, + 65u8, + 238u8, + 174u8, + 204u8, + ]; + pub fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + if log.topics.len() != 3usize { + return false; + } + if log.data.len() != 0usize { + return false; + } + return log.topics.get(0).expect("bounds already checked").as_ref() + == Self::TOPIC_ID; + } + pub fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Ok(Self { + fee: { + let mut v = [0 as u8; 32]; + ethabi::decode( + &[ethabi::ParamType::Uint(24usize)], + log.topics[1usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'fee' from topic of type 'uint24': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + tick_spacing: substreams::scalar::BigInt::from_signed_bytes_be( + log.topics[2usize].as_ref(), + ), + }) + } + } + impl substreams_ethereum::Event for FeeAmountEnabled { + const NAME: &'static str = "FeeAmountEnabled"; + fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + Self::match_log(log) + } + fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Self::decode(log) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct OwnerChanged { + pub old_owner: Vec, + pub new_owner: Vec, + } + impl OwnerChanged { + const TOPIC_ID: [u8; 32] = [ + 181u8, + 50u8, + 7u8, + 59u8, + 56u8, + 200u8, + 49u8, + 69u8, + 227u8, + 229u8, + 19u8, + 83u8, + 119u8, + 160u8, + 139u8, + 249u8, + 170u8, + 181u8, + 91u8, + 192u8, + 253u8, + 124u8, + 17u8, + 121u8, + 205u8, + 79u8, + 185u8, + 149u8, + 210u8, + 165u8, + 21u8, + 156u8, + ]; + pub fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + if log.topics.len() != 3usize { + return false; + } + if log.data.len() != 0usize { + return false; + } + return log.topics.get(0).expect("bounds already checked").as_ref() + == Self::TOPIC_ID; + } + pub fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Ok(Self { + old_owner: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[1usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'old_owner' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + new_owner: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[2usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'new_owner' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + }) + } + } + impl substreams_ethereum::Event for OwnerChanged { + const NAME: &'static str = "OwnerChanged"; + fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + Self::match_log(log) + } + fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Self::decode(log) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct PoolCreated { + pub token0: Vec, + pub token1: Vec, + pub fee: substreams::scalar::BigInt, + pub tick_spacing: substreams::scalar::BigInt, + pub pool: Vec, + } + impl PoolCreated { + const TOPIC_ID: [u8; 32] = [ + 120u8, + 60u8, + 202u8, + 28u8, + 4u8, + 18u8, + 221u8, + 13u8, + 105u8, + 94u8, + 120u8, + 69u8, + 104u8, + 201u8, + 109u8, + 162u8, + 233u8, + 194u8, + 47u8, + 249u8, + 137u8, + 53u8, + 122u8, + 46u8, + 139u8, + 29u8, + 155u8, + 43u8, + 78u8, + 107u8, + 113u8, + 24u8, + ]; + pub fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + if log.topics.len() != 4usize { + return false; + } + if log.data.len() != 64usize { + return false; + } + return log.topics.get(0).expect("bounds already checked").as_ref() + == Self::TOPIC_ID; + } + pub fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Int(24usize), ethabi::ParamType::Address], + log.data.as_ref(), + ) + .map_err(|e| format!("unable to decode log.data: {:?}", e))?; + values.reverse(); + Ok(Self { + token0: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[1usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'token0' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + token1: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[2usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'token1' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + fee: { + let mut v = [0 as u8; 32]; + ethabi::decode( + &[ethabi::ParamType::Uint(24usize)], + log.topics[3usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'fee' from topic of type 'uint24': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + tick_spacing: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + pool: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + }) + } + } + impl substreams_ethereum::Event for PoolCreated { + const NAME: &'static str = "PoolCreated"; + fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + Self::match_log(log) + } + fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Self::decode(log) + } + } + } \ No newline at end of file diff --git a/transforms/uniswap/src/abi/mod.rs b/transforms/uniswap/src/abi/mod.rs new file mode 100644 index 00000000000..851cfb329b5 --- /dev/null +++ b/transforms/uniswap/src/abi/mod.rs @@ -0,0 +1,17 @@ +#[rustfmt::skip] +#[allow(unused_imports)] +#[path = "erc20.rs"] +pub mod erc20; +#[rustfmt::skip] +#[allow(unused_imports)] +#[path = "factory.rs"] +pub mod factory; +#[rustfmt::skip] +#[allow(unused_imports)] +#[path = "pool.rs"] +pub mod pool; + +#[rustfmt::skip] +#[allow(unused_imports)] +#[path = "positionmanager.rs"] +pub mod positionmanager; diff --git a/transforms/uniswap/src/abi/pool.rs b/transforms/uniswap/src/abi/pool.rs new file mode 100644 index 00000000000..cac6e4f9c76 --- /dev/null +++ b/transforms/uniswap/src/abi/pool.rs @@ -0,0 +1,5135 @@ + const INTERNAL_ERR: &'static str = "`ethabi_derive` internal error"; + /// Contract's functions. + #[allow(dead_code, unused_imports, unused_variables)] + pub mod functions { + use super::INTERNAL_ERR; + #[derive(Debug, Clone, PartialEq)] + pub struct Burn { + pub tick_lower: substreams::scalar::BigInt, + pub tick_upper: substreams::scalar::BigInt, + pub amount: substreams::scalar::BigInt, + } + impl Burn { + const METHOD_ID: [u8; 4] = [163u8, 65u8, 35u8, 167u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Int(24usize), + ethabi::ParamType::Int(24usize), + ethabi::ParamType::Uint(128usize), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + tick_lower: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + tick_upper: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + amount: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + { + let non_full_signed_bytes = self + .tick_lower + .to_signed_bytes_be(); + let full_signed_bytes_init = if non_full_signed_bytes[0] + & 0x80 == 0x80 + { + 0xff + } else { + 0x00 + }; + let mut full_signed_bytes = [full_signed_bytes_init + as u8; 32]; + non_full_signed_bytes + .into_iter() + .rev() + .enumerate() + .for_each(|(i, byte)| full_signed_bytes[31 - i] = byte); + ethabi::Token::Int( + ethabi::Int::from_big_endian(full_signed_bytes.as_ref()), + ) + }, + { + let non_full_signed_bytes = self + .tick_upper + .to_signed_bytes_be(); + let full_signed_bytes_init = if non_full_signed_bytes[0] + & 0x80 == 0x80 + { + 0xff + } else { + 0x00 + }; + let mut full_signed_bytes = [full_signed_bytes_init + as u8; 32]; + non_full_signed_bytes + .into_iter() + .rev() + .enumerate() + .for_each(|(i, byte)| full_signed_bytes[31 - i] = byte); + ethabi::Token::Int( + ethabi::Int::from_big_endian(full_signed_bytes.as_ref()), + ) + }, + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.amount.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + Self::output(call.return_data.as_ref()) + } + pub fn output( + data: &[u8], + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + values.reverse(); + Ok(( + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + )) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call( + &self, + address: Vec, + ) -> Option<(substreams::scalar::BigInt, substreams::scalar::BigInt)> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Burn { + const NAME: &'static str = "burn"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + > for Burn { + fn output( + data: &[u8], + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Collect { + pub recipient: Vec, + pub tick_lower: substreams::scalar::BigInt, + pub tick_upper: substreams::scalar::BigInt, + pub amount0_requested: substreams::scalar::BigInt, + pub amount1_requested: substreams::scalar::BigInt, + } + impl Collect { + const METHOD_ID: [u8; 4] = [79u8, 30u8, 179u8, 216u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Int(24usize), + ethabi::ParamType::Int(24usize), + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Uint(128usize), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + recipient: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + tick_lower: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + tick_upper: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + amount0_requested: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount1_requested: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address( + ethabi::Address::from_slice(&self.recipient), + ), + { + let non_full_signed_bytes = self + .tick_lower + .to_signed_bytes_be(); + let full_signed_bytes_init = if non_full_signed_bytes[0] + & 0x80 == 0x80 + { + 0xff + } else { + 0x00 + }; + let mut full_signed_bytes = [full_signed_bytes_init + as u8; 32]; + non_full_signed_bytes + .into_iter() + .rev() + .enumerate() + .for_each(|(i, byte)| full_signed_bytes[31 - i] = byte); + ethabi::Token::Int( + ethabi::Int::from_big_endian(full_signed_bytes.as_ref()), + ) + }, + { + let non_full_signed_bytes = self + .tick_upper + .to_signed_bytes_be(); + let full_signed_bytes_init = if non_full_signed_bytes[0] + & 0x80 == 0x80 + { + 0xff + } else { + 0x00 + }; + let mut full_signed_bytes = [full_signed_bytes_init + as u8; 32]; + non_full_signed_bytes + .into_iter() + .rev() + .enumerate() + .for_each(|(i, byte)| full_signed_bytes[31 - i] = byte); + ethabi::Token::Int( + ethabi::Int::from_big_endian(full_signed_bytes.as_ref()), + ) + }, + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.amount0_requested.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.amount1_requested.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + Self::output(call.return_data.as_ref()) + } + pub fn output( + data: &[u8], + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Uint(128usize), + ], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + values.reverse(); + Ok(( + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + )) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call( + &self, + address: Vec, + ) -> Option<(substreams::scalar::BigInt, substreams::scalar::BigInt)> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Collect { + const NAME: &'static str = "collect"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + > for Collect { + fn output( + data: &[u8], + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct CollectProtocol { + pub recipient: Vec, + pub amount0_requested: substreams::scalar::BigInt, + pub amount1_requested: substreams::scalar::BigInt, + } + impl CollectProtocol { + const METHOD_ID: [u8; 4] = [133u8, 182u8, 103u8, 41u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Uint(128usize), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + recipient: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + amount0_requested: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount1_requested: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address( + ethabi::Address::from_slice(&self.recipient), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.amount0_requested.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.amount1_requested.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + Self::output(call.return_data.as_ref()) + } + pub fn output( + data: &[u8], + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Uint(128usize), + ], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + values.reverse(); + Ok(( + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + )) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call( + &self, + address: Vec, + ) -> Option<(substreams::scalar::BigInt, substreams::scalar::BigInt)> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for CollectProtocol { + const NAME: &'static str = "collectProtocol"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + > for CollectProtocol { + fn output( + data: &[u8], + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Factory {} + impl Factory { + const METHOD_ID: [u8; 4] = [196u8, 90u8, 1u8, 85u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result, String> { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result, String> { + let mut values = ethabi::decode( + &[ethabi::ParamType::Address], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Factory { + const NAME: &'static str = "factory"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable> for Factory { + fn output(data: &[u8]) -> Result, String> { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Fee {} + impl Fee { + const METHOD_ID: [u8; 4] = [221u8, 202u8, 63u8, 67u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(24usize)], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok({ + let mut v = [0 as u8; 32]; + values + .pop() + .expect("one output data should have existed") + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Fee { + const NAME: &'static str = "fee"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable for Fee { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct FeeGrowthGlobal0X128 {} + impl FeeGrowthGlobal0X128 { + const METHOD_ID: [u8; 4] = [243u8, 5u8, 131u8, 153u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok({ + let mut v = [0 as u8; 32]; + values + .pop() + .expect("one output data should have existed") + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for FeeGrowthGlobal0X128 { + const NAME: &'static str = "feeGrowthGlobal0X128"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable + for FeeGrowthGlobal0X128 { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct FeeGrowthGlobal1X128 {} + impl FeeGrowthGlobal1X128 { + const METHOD_ID: [u8; 4] = [70u8, 20u8, 19u8, 25u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok({ + let mut v = [0 as u8; 32]; + values + .pop() + .expect("one output data should have existed") + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for FeeGrowthGlobal1X128 { + const NAME: &'static str = "feeGrowthGlobal1X128"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable + for FeeGrowthGlobal1X128 { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Flash { + pub recipient: Vec, + pub amount0: substreams::scalar::BigInt, + pub amount1: substreams::scalar::BigInt, + pub data: Vec, + } + impl Flash { + const METHOD_ID: [u8; 4] = [73u8, 14u8, 108u8, 188u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Bytes, + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + recipient: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + amount0: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount1: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + data: values + .pop() + .expect(INTERNAL_ERR) + .into_bytes() + .expect(INTERNAL_ERR), + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address( + ethabi::Address::from_slice(&self.recipient), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.amount0.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.amount1.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Bytes(self.data.clone()), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + } + impl substreams_ethereum::Function for Flash { + const NAME: &'static str = "flash"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct IncreaseObservationCardinalityNext { + pub observation_cardinality_next: substreams::scalar::BigInt, + } + impl IncreaseObservationCardinalityNext { + const METHOD_ID: [u8; 4] = [50u8, 20u8, 143u8, 103u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(16usize)], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + observation_cardinality_next: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self + .observation_cardinality_next + .clone() + .to_bytes_be() + { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + } + impl substreams_ethereum::Function for IncreaseObservationCardinalityNext { + const NAME: &'static str = "increaseObservationCardinalityNext"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Initialize { + pub sqrt_price_x96: substreams::scalar::BigInt, + } + impl Initialize { + const METHOD_ID: [u8; 4] = [246u8, 55u8, 115u8, 29u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(160usize)], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + sqrt_price_x96: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.sqrt_price_x96.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + } + impl substreams_ethereum::Function for Initialize { + const NAME: &'static str = "initialize"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Liquidity {} + impl Liquidity { + const METHOD_ID: [u8; 4] = [26u8, 104u8, 101u8, 2u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(128usize)], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok({ + let mut v = [0 as u8; 32]; + values + .pop() + .expect("one output data should have existed") + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Liquidity { + const NAME: &'static str = "liquidity"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable + for Liquidity { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct MaxLiquidityPerTick {} + impl MaxLiquidityPerTick { + const METHOD_ID: [u8; 4] = [112u8, 207u8, 117u8, 74u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(128usize)], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok({ + let mut v = [0 as u8; 32]; + values + .pop() + .expect("one output data should have existed") + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for MaxLiquidityPerTick { + const NAME: &'static str = "maxLiquidityPerTick"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable + for MaxLiquidityPerTick { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Mint { + pub recipient: Vec, + pub tick_lower: substreams::scalar::BigInt, + pub tick_upper: substreams::scalar::BigInt, + pub amount: substreams::scalar::BigInt, + pub data: Vec, + } + impl Mint { + const METHOD_ID: [u8; 4] = [60u8, 138u8, 125u8, 141u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Int(24usize), + ethabi::ParamType::Int(24usize), + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Bytes, + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + recipient: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + tick_lower: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + tick_upper: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + amount: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + data: values + .pop() + .expect(INTERNAL_ERR) + .into_bytes() + .expect(INTERNAL_ERR), + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address( + ethabi::Address::from_slice(&self.recipient), + ), + { + let non_full_signed_bytes = self + .tick_lower + .to_signed_bytes_be(); + let full_signed_bytes_init = if non_full_signed_bytes[0] + & 0x80 == 0x80 + { + 0xff + } else { + 0x00 + }; + let mut full_signed_bytes = [full_signed_bytes_init + as u8; 32]; + non_full_signed_bytes + .into_iter() + .rev() + .enumerate() + .for_each(|(i, byte)| full_signed_bytes[31 - i] = byte); + ethabi::Token::Int( + ethabi::Int::from_big_endian(full_signed_bytes.as_ref()), + ) + }, + { + let non_full_signed_bytes = self + .tick_upper + .to_signed_bytes_be(); + let full_signed_bytes_init = if non_full_signed_bytes[0] + & 0x80 == 0x80 + { + 0xff + } else { + 0x00 + }; + let mut full_signed_bytes = [full_signed_bytes_init + as u8; 32]; + non_full_signed_bytes + .into_iter() + .rev() + .enumerate() + .for_each(|(i, byte)| full_signed_bytes[31 - i] = byte); + ethabi::Token::Int( + ethabi::Int::from_big_endian(full_signed_bytes.as_ref()), + ) + }, + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.amount.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Bytes(self.data.clone()), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + Self::output(call.return_data.as_ref()) + } + pub fn output( + data: &[u8], + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + values.reverse(); + Ok(( + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + )) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call( + &self, + address: Vec, + ) -> Option<(substreams::scalar::BigInt, substreams::scalar::BigInt)> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Mint { + const NAME: &'static str = "mint"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + > for Mint { + fn output( + data: &[u8], + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Observations { + pub index: substreams::scalar::BigInt, + } + impl Observations { + const METHOD_ID: [u8; 4] = [37u8, 44u8, 9u8, 215u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + index: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.index.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + bool, + ), + String, + > { + Self::output(call.return_data.as_ref()) + } + pub fn output( + data: &[u8], + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + bool, + ), + String, + > { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(32usize), + ethabi::ParamType::Int(56usize), + ethabi::ParamType::Uint(160usize), + ethabi::ParamType::Bool, + ], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + values.reverse(); + Ok(( + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + values.pop().expect(INTERNAL_ERR).into_bool().expect(INTERNAL_ERR), + )) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call( + &self, + address: Vec, + ) -> Option< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + bool, + ), + > { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Observations { + const NAME: &'static str = "observations"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + bool, + ), + > for Observations { + fn output( + data: &[u8], + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + bool, + ), + String, + > { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Observe { + pub seconds_agos: Vec, + } + impl Observe { + const METHOD_ID: [u8; 4] = [136u8, 59u8, 219u8, 253u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Array( + Box::new(ethabi::ParamType::Uint(32usize)), + ), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + seconds_agos: values + .pop() + .expect(INTERNAL_ERR) + .into_array() + .expect(INTERNAL_ERR) + .into_iter() + .map(|inner| { + let mut v = [0 as u8; 32]; + inner + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }) + .collect(), + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + { + let v = self + .seconds_agos + .iter() + .map(|inner| ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match inner.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + )) + .collect(); + ethabi::Token::Array(v) + }, + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result< + (Vec, Vec), + String, + > { + Self::output(call.return_data.as_ref()) + } + pub fn output( + data: &[u8], + ) -> Result< + (Vec, Vec), + String, + > { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Array( + Box::new(ethabi::ParamType::Int(56usize)), + ), + ethabi::ParamType::Array( + Box::new(ethabi::ParamType::Uint(160usize)), + ), + ], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + values.reverse(); + Ok(( + values + .pop() + .expect(INTERNAL_ERR) + .into_array() + .expect(INTERNAL_ERR) + .into_iter() + .map(|inner| { + let mut v = [0 as u8; 32]; + inner + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }) + .collect(), + values + .pop() + .expect(INTERNAL_ERR) + .into_array() + .expect(INTERNAL_ERR) + .into_iter() + .map(|inner| { + let mut v = [0 as u8; 32]; + inner + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }) + .collect(), + )) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call( + &self, + address: Vec, + ) -> Option< + (Vec, Vec), + > { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Observe { + const NAME: &'static str = "observe"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable< + (Vec, Vec), + > for Observe { + fn output( + data: &[u8], + ) -> Result< + (Vec, Vec), + String, + > { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Positions { + pub key: [u8; 32usize], + } + impl Positions { + const METHOD_ID: [u8; 4] = [81u8, 78u8, 164u8, 191u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::FixedBytes(32usize)], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + key: { + let mut result = [0u8; 32]; + let v = values + .pop() + .expect(INTERNAL_ERR) + .into_fixed_bytes() + .expect(INTERNAL_ERR); + result.copy_from_slice(&v); + result + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ethabi::Token::FixedBytes(self.key.as_ref().to_vec())], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + String, + > { + Self::output(call.return_data.as_ref()) + } + pub fn output( + data: &[u8], + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + String, + > { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Uint(128usize), + ], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + values.reverse(); + Ok(( + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + )) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call( + &self, + address: Vec, + ) -> Option< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + > { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Positions { + const NAME: &'static str = "positions"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + > for Positions { + fn output( + data: &[u8], + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + String, + > { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct ProtocolFees {} + impl ProtocolFees { + const METHOD_ID: [u8; 4] = [26u8, 216u8, 176u8, 59u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + Self::output(call.return_data.as_ref()) + } + pub fn output( + data: &[u8], + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Uint(128usize), + ], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + values.reverse(); + Ok(( + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + )) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call( + &self, + address: Vec, + ) -> Option<(substreams::scalar::BigInt, substreams::scalar::BigInt)> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for ProtocolFees { + const NAME: &'static str = "protocolFees"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + > for ProtocolFees { + fn output( + data: &[u8], + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct SetFeeProtocol { + pub fee_protocol0: substreams::scalar::BigInt, + pub fee_protocol1: substreams::scalar::BigInt, + } + impl SetFeeProtocol { + const METHOD_ID: [u8; 4] = [130u8, 6u8, 164u8, 209u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(8usize), + ethabi::ParamType::Uint(8usize), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + fee_protocol0: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + fee_protocol1: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.fee_protocol0.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.fee_protocol1.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + } + impl substreams_ethereum::Function for SetFeeProtocol { + const NAME: &'static str = "setFeeProtocol"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Slot0 {} + impl Slot0 { + const METHOD_ID: [u8; 4] = [56u8, 80u8, 199u8, 189u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + bool, + ), + String, + > { + Self::output(call.return_data.as_ref()) + } + pub fn output( + data: &[u8], + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + bool, + ), + String, + > { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(160usize), + ethabi::ParamType::Int(24usize), + ethabi::ParamType::Uint(16usize), + ethabi::ParamType::Uint(16usize), + ethabi::ParamType::Uint(16usize), + ethabi::ParamType::Uint(8usize), + ethabi::ParamType::Bool, + ], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + values.reverse(); + Ok(( + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + values.pop().expect(INTERNAL_ERR).into_bool().expect(INTERNAL_ERR), + )) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call( + &self, + address: Vec, + ) -> Option< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + bool, + ), + > { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Slot0 { + const NAME: &'static str = "slot0"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + bool, + ), + > for Slot0 { + fn output( + data: &[u8], + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + bool, + ), + String, + > { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct SnapshotCumulativesInside { + pub tick_lower: substreams::scalar::BigInt, + pub tick_upper: substreams::scalar::BigInt, + } + impl SnapshotCumulativesInside { + const METHOD_ID: [u8; 4] = [163u8, 136u8, 7u8, 242u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Int(24usize), + ethabi::ParamType::Int(24usize), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + tick_lower: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + tick_upper: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + { + let non_full_signed_bytes = self + .tick_lower + .to_signed_bytes_be(); + let full_signed_bytes_init = if non_full_signed_bytes[0] + & 0x80 == 0x80 + { + 0xff + } else { + 0x00 + }; + let mut full_signed_bytes = [full_signed_bytes_init + as u8; 32]; + non_full_signed_bytes + .into_iter() + .rev() + .enumerate() + .for_each(|(i, byte)| full_signed_bytes[31 - i] = byte); + ethabi::Token::Int( + ethabi::Int::from_big_endian(full_signed_bytes.as_ref()), + ) + }, + { + let non_full_signed_bytes = self + .tick_upper + .to_signed_bytes_be(); + let full_signed_bytes_init = if non_full_signed_bytes[0] + & 0x80 == 0x80 + { + 0xff + } else { + 0x00 + }; + let mut full_signed_bytes = [full_signed_bytes_init + as u8; 32]; + non_full_signed_bytes + .into_iter() + .rev() + .enumerate() + .for_each(|(i, byte)| full_signed_bytes[31 - i] = byte); + ethabi::Token::Int( + ethabi::Int::from_big_endian(full_signed_bytes.as_ref()), + ) + }, + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + String, + > { + Self::output(call.return_data.as_ref()) + } + pub fn output( + data: &[u8], + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + String, + > { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Int(56usize), + ethabi::ParamType::Uint(160usize), + ethabi::ParamType::Uint(32usize), + ], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + values.reverse(); + Ok(( + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + )) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call( + &self, + address: Vec, + ) -> Option< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + > { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for SnapshotCumulativesInside { + const NAME: &'static str = "snapshotCumulativesInside"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + > for SnapshotCumulativesInside { + fn output( + data: &[u8], + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + String, + > { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Swap { + pub recipient: Vec, + pub zero_for_one: bool, + pub amount_specified: substreams::scalar::BigInt, + pub sqrt_price_limit_x96: substreams::scalar::BigInt, + pub data: Vec, + } + impl Swap { + const METHOD_ID: [u8; 4] = [18u8, 138u8, 203u8, 8u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Bool, + ethabi::ParamType::Int(256usize), + ethabi::ParamType::Uint(160usize), + ethabi::ParamType::Bytes, + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + recipient: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + zero_for_one: values + .pop() + .expect(INTERNAL_ERR) + .into_bool() + .expect(INTERNAL_ERR), + amount_specified: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + sqrt_price_limit_x96: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + data: values + .pop() + .expect(INTERNAL_ERR) + .into_bytes() + .expect(INTERNAL_ERR), + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address( + ethabi::Address::from_slice(&self.recipient), + ), + ethabi::Token::Bool(self.zero_for_one.clone()), + { + let non_full_signed_bytes = self + .amount_specified + .to_signed_bytes_be(); + let full_signed_bytes_init = if non_full_signed_bytes[0] + & 0x80 == 0x80 + { + 0xff + } else { + 0x00 + }; + let mut full_signed_bytes = [full_signed_bytes_init + as u8; 32]; + non_full_signed_bytes + .into_iter() + .rev() + .enumerate() + .for_each(|(i, byte)| full_signed_bytes[31 - i] = byte); + ethabi::Token::Int( + ethabi::Int::from_big_endian(full_signed_bytes.as_ref()), + ) + }, + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.sqrt_price_limit_x96.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Bytes(self.data.clone()), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + Self::output(call.return_data.as_ref()) + } + pub fn output( + data: &[u8], + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Int(256usize), + ethabi::ParamType::Int(256usize), + ], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + values.reverse(); + Ok(( + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + )) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call( + &self, + address: Vec, + ) -> Option<(substreams::scalar::BigInt, substreams::scalar::BigInt)> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Swap { + const NAME: &'static str = "swap"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + > for Swap { + fn output( + data: &[u8], + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct TickBitmap { + pub word_position: substreams::scalar::BigInt, + } + impl TickBitmap { + const METHOD_ID: [u8; 4] = [83u8, 57u8, 194u8, 150u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Int(16usize)], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + word_position: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + { + let non_full_signed_bytes = self + .word_position + .to_signed_bytes_be(); + let full_signed_bytes_init = if non_full_signed_bytes[0] + & 0x80 == 0x80 + { + 0xff + } else { + 0x00 + }; + let mut full_signed_bytes = [full_signed_bytes_init + as u8; 32]; + non_full_signed_bytes + .into_iter() + .rev() + .enumerate() + .for_each(|(i, byte)| full_signed_bytes[31 - i] = byte); + ethabi::Token::Int( + ethabi::Int::from_big_endian(full_signed_bytes.as_ref()), + ) + }, + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok({ + let mut v = [0 as u8; 32]; + values + .pop() + .expect("one output data should have existed") + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for TickBitmap { + const NAME: &'static str = "tickBitmap"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable + for TickBitmap { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct TickSpacing {} + impl TickSpacing { + const METHOD_ID: [u8; 4] = [208u8, 201u8, 58u8, 124u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Int(24usize)], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok({ + let mut v = [0 as u8; 32]; + values + .pop() + .expect("one output data should have existed") + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for TickSpacing { + const NAME: &'static str = "tickSpacing"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable + for TickSpacing { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Ticks { + pub tick: substreams::scalar::BigInt, + } + impl Ticks { + const METHOD_ID: [u8; 4] = [243u8, 13u8, 186u8, 147u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Int(24usize)], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + tick: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + { + let non_full_signed_bytes = self.tick.to_signed_bytes_be(); + let full_signed_bytes_init = if non_full_signed_bytes[0] + & 0x80 == 0x80 + { + 0xff + } else { + 0x00 + }; + let mut full_signed_bytes = [full_signed_bytes_init + as u8; 32]; + non_full_signed_bytes + .into_iter() + .rev() + .enumerate() + .for_each(|(i, byte)| full_signed_bytes[31 - i] = byte); + ethabi::Token::Int( + ethabi::Int::from_big_endian(full_signed_bytes.as_ref()), + ) + }, + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + bool, + ), + String, + > { + Self::output(call.return_data.as_ref()) + } + pub fn output( + data: &[u8], + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + bool, + ), + String, + > { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Int(128usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Int(56usize), + ethabi::ParamType::Uint(160usize), + ethabi::ParamType::Uint(32usize), + ethabi::ParamType::Bool, + ], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + values.reverse(); + Ok(( + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + values.pop().expect(INTERNAL_ERR).into_bool().expect(INTERNAL_ERR), + )) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call( + &self, + address: Vec, + ) -> Option< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + bool, + ), + > { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Ticks { + const NAME: &'static str = "ticks"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + bool, + ), + > for Ticks { + fn output( + data: &[u8], + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + bool, + ), + String, + > { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Token0 {} + impl Token0 { + const METHOD_ID: [u8; 4] = [13u8, 254u8, 22u8, 129u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result, String> { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result, String> { + let mut values = ethabi::decode( + &[ethabi::ParamType::Address], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Token0 { + const NAME: &'static str = "token0"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable> for Token0 { + fn output(data: &[u8]) -> Result, String> { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Token1 {} + impl Token1 { + const METHOD_ID: [u8; 4] = [210u8, 18u8, 32u8, 167u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result, String> { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result, String> { + let mut values = ethabi::decode( + &[ethabi::ParamType::Address], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Token1 { + const NAME: &'static str = "token1"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable> for Token1 { + fn output(data: &[u8]) -> Result, String> { + Self::output(data) + } + } + } + /// Contract's events. + #[allow(dead_code, unused_imports, unused_variables)] + pub mod events { + use super::INTERNAL_ERR; + #[derive(Debug, Clone, PartialEq)] + pub struct Burn { + pub owner: Vec, + pub tick_lower: substreams::scalar::BigInt, + pub tick_upper: substreams::scalar::BigInt, + pub amount: substreams::scalar::BigInt, + pub amount0: substreams::scalar::BigInt, + pub amount1: substreams::scalar::BigInt, + } + impl Burn { + const TOPIC_ID: [u8; 32] = [ + 12u8, + 57u8, + 108u8, + 217u8, + 137u8, + 163u8, + 159u8, + 68u8, + 89u8, + 181u8, + 250u8, + 26u8, + 237u8, + 106u8, + 154u8, + 141u8, + 205u8, + 188u8, + 69u8, + 144u8, + 138u8, + 207u8, + 214u8, + 126u8, + 2u8, + 140u8, + 213u8, + 104u8, + 218u8, + 152u8, + 152u8, + 44u8, + ]; + pub fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + if log.topics.len() != 4usize { + return false; + } + if log.data.len() != 96usize { + return false; + } + return log.topics.get(0).expect("bounds already checked").as_ref() + == Self::TOPIC_ID; + } + pub fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ], + log.data.as_ref(), + ) + .map_err(|e| format!("unable to decode log.data: {:?}", e))?; + values.reverse(); + Ok(Self { + owner: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[1usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'owner' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + tick_lower: substreams::scalar::BigInt::from_signed_bytes_be( + log.topics[2usize].as_ref(), + ), + tick_upper: substreams::scalar::BigInt::from_signed_bytes_be( + log.topics[3usize].as_ref(), + ), + amount: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount0: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount1: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + } + impl substreams_ethereum::Event for Burn { + const NAME: &'static str = "Burn"; + fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + Self::match_log(log) + } + fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Self::decode(log) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Collect { + pub owner: Vec, + pub recipient: Vec, + pub tick_lower: substreams::scalar::BigInt, + pub tick_upper: substreams::scalar::BigInt, + pub amount0: substreams::scalar::BigInt, + pub amount1: substreams::scalar::BigInt, + } + impl Collect { + const TOPIC_ID: [u8; 32] = [ + 112u8, + 147u8, + 83u8, + 56u8, + 230u8, + 151u8, + 117u8, + 69u8, + 106u8, + 133u8, + 221u8, + 239u8, + 34u8, + 108u8, + 57u8, + 95u8, + 182u8, + 104u8, + 182u8, + 63u8, + 160u8, + 17u8, + 95u8, + 95u8, + 32u8, + 97u8, + 11u8, + 56u8, + 142u8, + 108u8, + 169u8, + 192u8, + ]; + pub fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + if log.topics.len() != 4usize { + return false; + } + if log.data.len() != 96usize { + return false; + } + return log.topics.get(0).expect("bounds already checked").as_ref() + == Self::TOPIC_ID; + } + pub fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Uint(128usize), + ], + log.data.as_ref(), + ) + .map_err(|e| format!("unable to decode log.data: {:?}", e))?; + values.reverse(); + Ok(Self { + owner: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[1usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'owner' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + tick_lower: substreams::scalar::BigInt::from_signed_bytes_be( + log.topics[2usize].as_ref(), + ), + tick_upper: substreams::scalar::BigInt::from_signed_bytes_be( + log.topics[3usize].as_ref(), + ), + recipient: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + amount0: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount1: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + } + impl substreams_ethereum::Event for Collect { + const NAME: &'static str = "Collect"; + fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + Self::match_log(log) + } + fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Self::decode(log) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct CollectProtocol { + pub sender: Vec, + pub recipient: Vec, + pub amount0: substreams::scalar::BigInt, + pub amount1: substreams::scalar::BigInt, + } + impl CollectProtocol { + const TOPIC_ID: [u8; 32] = [ + 89u8, + 107u8, + 87u8, + 57u8, + 6u8, + 33u8, + 141u8, + 52u8, + 17u8, + 133u8, + 11u8, + 38u8, + 166u8, + 180u8, + 55u8, + 214u8, + 196u8, + 82u8, + 47u8, + 219u8, + 67u8, + 210u8, + 210u8, + 56u8, + 98u8, + 99u8, + 248u8, + 109u8, + 80u8, + 184u8, + 177u8, + 81u8, + ]; + pub fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + if log.topics.len() != 3usize { + return false; + } + if log.data.len() != 64usize { + return false; + } + return log.topics.get(0).expect("bounds already checked").as_ref() + == Self::TOPIC_ID; + } + pub fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Uint(128usize), + ], + log.data.as_ref(), + ) + .map_err(|e| format!("unable to decode log.data: {:?}", e))?; + values.reverse(); + Ok(Self { + sender: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[1usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'sender' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + recipient: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[2usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'recipient' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + amount0: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount1: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + } + impl substreams_ethereum::Event for CollectProtocol { + const NAME: &'static str = "CollectProtocol"; + fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + Self::match_log(log) + } + fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Self::decode(log) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Flash { + pub sender: Vec, + pub recipient: Vec, + pub amount0: substreams::scalar::BigInt, + pub amount1: substreams::scalar::BigInt, + pub paid0: substreams::scalar::BigInt, + pub paid1: substreams::scalar::BigInt, + } + impl Flash { + const TOPIC_ID: [u8; 32] = [ + 189u8, + 189u8, + 183u8, + 29u8, + 120u8, + 96u8, + 55u8, + 107u8, + 165u8, + 43u8, + 37u8, + 165u8, + 2u8, + 139u8, + 238u8, + 162u8, + 53u8, + 129u8, + 54u8, + 74u8, + 64u8, + 82u8, + 47u8, + 107u8, + 207u8, + 184u8, + 107u8, + 177u8, + 242u8, + 220u8, + 166u8, + 51u8, + ]; + pub fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + if log.topics.len() != 3usize { + return false; + } + if log.data.len() != 128usize { + return false; + } + return log.topics.get(0).expect("bounds already checked").as_ref() + == Self::TOPIC_ID; + } + pub fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ], + log.data.as_ref(), + ) + .map_err(|e| format!("unable to decode log.data: {:?}", e))?; + values.reverse(); + Ok(Self { + sender: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[1usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'sender' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + recipient: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[2usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'recipient' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + amount0: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount1: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + paid0: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + paid1: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + } + impl substreams_ethereum::Event for Flash { + const NAME: &'static str = "Flash"; + fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + Self::match_log(log) + } + fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Self::decode(log) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct IncreaseObservationCardinalityNext { + pub observation_cardinality_next_old: substreams::scalar::BigInt, + pub observation_cardinality_next_new: substreams::scalar::BigInt, + } + impl IncreaseObservationCardinalityNext { + const TOPIC_ID: [u8; 32] = [ + 172u8, + 73u8, + 229u8, + 24u8, + 249u8, + 10u8, + 53u8, + 143u8, + 101u8, + 46u8, + 68u8, + 0u8, + 22u8, + 79u8, + 5u8, + 165u8, + 216u8, + 247u8, + 227u8, + 94u8, + 119u8, + 71u8, + 39u8, + 155u8, + 195u8, + 169u8, + 61u8, + 191u8, + 88u8, + 78u8, + 18u8, + 90u8, + ]; + pub fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + if log.topics.len() != 1usize { + return false; + } + if log.data.len() != 64usize { + return false; + } + return log.topics.get(0).expect("bounds already checked").as_ref() + == Self::TOPIC_ID; + } + pub fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(16usize), + ethabi::ParamType::Uint(16usize), + ], + log.data.as_ref(), + ) + .map_err(|e| format!("unable to decode log.data: {:?}", e))?; + values.reverse(); + Ok(Self { + observation_cardinality_next_old: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + observation_cardinality_next_new: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + } + impl substreams_ethereum::Event for IncreaseObservationCardinalityNext { + const NAME: &'static str = "IncreaseObservationCardinalityNext"; + fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + Self::match_log(log) + } + fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Self::decode(log) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Initialize { + pub sqrt_price_x96: substreams::scalar::BigInt, + pub tick: substreams::scalar::BigInt, + } + impl Initialize { + const TOPIC_ID: [u8; 32] = [ + 152u8, + 99u8, + 96u8, + 54u8, + 203u8, + 102u8, + 169u8, + 193u8, + 154u8, + 55u8, + 67u8, + 94u8, + 252u8, + 30u8, + 144u8, + 20u8, + 33u8, + 144u8, + 33u8, + 78u8, + 138u8, + 190u8, + 184u8, + 33u8, + 189u8, + 186u8, + 63u8, + 41u8, + 144u8, + 221u8, + 76u8, + 149u8, + ]; + pub fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + if log.topics.len() != 1usize { + return false; + } + if log.data.len() != 64usize { + return false; + } + return log.topics.get(0).expect("bounds already checked").as_ref() + == Self::TOPIC_ID; + } + pub fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(160usize), + ethabi::ParamType::Int(24usize), + ], + log.data.as_ref(), + ) + .map_err(|e| format!("unable to decode log.data: {:?}", e))?; + values.reverse(); + Ok(Self { + sqrt_price_x96: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + tick: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + }) + } + } + impl substreams_ethereum::Event for Initialize { + const NAME: &'static str = "Initialize"; + fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + Self::match_log(log) + } + fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Self::decode(log) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Mint { + pub sender: Vec, + pub owner: Vec, + pub tick_lower: substreams::scalar::BigInt, + pub tick_upper: substreams::scalar::BigInt, + pub amount: substreams::scalar::BigInt, + pub amount0: substreams::scalar::BigInt, + pub amount1: substreams::scalar::BigInt, + } + impl Mint { + const TOPIC_ID: [u8; 32] = [ + 122u8, + 83u8, + 8u8, + 11u8, + 164u8, + 20u8, + 21u8, + 139u8, + 231u8, + 236u8, + 105u8, + 185u8, + 135u8, + 181u8, + 251u8, + 125u8, + 7u8, + 222u8, + 225u8, + 1u8, + 254u8, + 133u8, + 72u8, + 143u8, + 8u8, + 83u8, + 174u8, + 22u8, + 35u8, + 157u8, + 11u8, + 222u8, + ]; + pub fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + if log.topics.len() != 4usize { + return false; + } + if log.data.len() != 128usize { + return false; + } + return log.topics.get(0).expect("bounds already checked").as_ref() + == Self::TOPIC_ID; + } + pub fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ], + log.data.as_ref(), + ) + .map_err(|e| format!("unable to decode log.data: {:?}", e))?; + values.reverse(); + Ok(Self { + owner: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[1usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'owner' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + tick_lower: substreams::scalar::BigInt::from_signed_bytes_be( + log.topics[2usize].as_ref(), + ), + tick_upper: substreams::scalar::BigInt::from_signed_bytes_be( + log.topics[3usize].as_ref(), + ), + sender: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + amount: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount0: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount1: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + } + impl substreams_ethereum::Event for Mint { + const NAME: &'static str = "Mint"; + fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + Self::match_log(log) + } + fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Self::decode(log) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct SetFeeProtocol { + pub fee_protocol0_old: substreams::scalar::BigInt, + pub fee_protocol1_old: substreams::scalar::BigInt, + pub fee_protocol0_new: substreams::scalar::BigInt, + pub fee_protocol1_new: substreams::scalar::BigInt, + } + impl SetFeeProtocol { + const TOPIC_ID: [u8; 32] = [ + 151u8, + 61u8, + 141u8, + 146u8, + 187u8, + 41u8, + 159u8, + 74u8, + 246u8, + 206u8, + 73u8, + 181u8, + 42u8, + 138u8, + 219u8, + 133u8, + 174u8, + 70u8, + 185u8, + 242u8, + 20u8, + 196u8, + 196u8, + 252u8, + 6u8, + 172u8, + 119u8, + 64u8, + 18u8, + 55u8, + 177u8, + 51u8, + ]; + pub fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + if log.topics.len() != 1usize { + return false; + } + if log.data.len() != 128usize { + return false; + } + return log.topics.get(0).expect("bounds already checked").as_ref() + == Self::TOPIC_ID; + } + pub fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(8usize), + ethabi::ParamType::Uint(8usize), + ethabi::ParamType::Uint(8usize), + ethabi::ParamType::Uint(8usize), + ], + log.data.as_ref(), + ) + .map_err(|e| format!("unable to decode log.data: {:?}", e))?; + values.reverse(); + Ok(Self { + fee_protocol0_old: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + fee_protocol1_old: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + fee_protocol0_new: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + fee_protocol1_new: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + } + impl substreams_ethereum::Event for SetFeeProtocol { + const NAME: &'static str = "SetFeeProtocol"; + fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + Self::match_log(log) + } + fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Self::decode(log) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Swap { + pub sender: Vec, + pub recipient: Vec, + pub amount0: substreams::scalar::BigInt, + pub amount1: substreams::scalar::BigInt, + pub sqrt_price_x96: substreams::scalar::BigInt, + pub liquidity: substreams::scalar::BigInt, + pub tick: substreams::scalar::BigInt, + } + impl Swap { + const TOPIC_ID: [u8; 32] = [ + 196u8, + 32u8, + 121u8, + 249u8, + 74u8, + 99u8, + 80u8, + 215u8, + 230u8, + 35u8, + 95u8, + 41u8, + 23u8, + 73u8, + 36u8, + 249u8, + 40u8, + 204u8, + 42u8, + 200u8, + 24u8, + 235u8, + 100u8, + 254u8, + 216u8, + 0u8, + 78u8, + 17u8, + 95u8, + 188u8, + 202u8, + 103u8, + ]; + pub fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + if log.topics.len() != 3usize { + return false; + } + if log.data.len() != 160usize { + return false; + } + return log.topics.get(0).expect("bounds already checked").as_ref() + == Self::TOPIC_ID; + } + pub fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Int(256usize), + ethabi::ParamType::Int(256usize), + ethabi::ParamType::Uint(160usize), + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Int(24usize), + ], + log.data.as_ref(), + ) + .map_err(|e| format!("unable to decode log.data: {:?}", e))?; + values.reverse(); + Ok(Self { + sender: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[1usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'sender' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + recipient: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[2usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'recipient' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + amount0: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + amount1: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + sqrt_price_x96: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + liquidity: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + tick: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + }) + } + } + impl substreams_ethereum::Event for Swap { + const NAME: &'static str = "Swap"; + fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + Self::match_log(log) + } + fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Self::decode(log) + } + } + } \ No newline at end of file diff --git a/transforms/uniswap/src/abi/positionmanager.rs b/transforms/uniswap/src/abi/positionmanager.rs new file mode 100644 index 00000000000..a9c7e844747 --- /dev/null +++ b/transforms/uniswap/src/abi/positionmanager.rs @@ -0,0 +1,5863 @@ + const INTERNAL_ERR: &'static str = "`ethabi_derive` internal error"; + /// Contract's functions. + #[allow(dead_code, unused_imports, unused_variables)] + pub mod functions { + use super::INTERNAL_ERR; + #[derive(Debug, Clone, PartialEq)] + pub struct DomainSeparator {} + impl DomainSeparator { + const METHOD_ID: [u8; 4] = [54u8, 68u8, 229u8, 21u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result<[u8; 32usize], String> { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result<[u8; 32usize], String> { + let mut values = ethabi::decode( + &[ethabi::ParamType::FixedBytes(32usize)], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok({ + let mut result = [0u8; 32]; + let v = values + .pop() + .expect("one output data should have existed") + .into_fixed_bytes() + .expect(INTERNAL_ERR); + result.copy_from_slice(&v); + result + }) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option<[u8; 32usize]> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for DomainSeparator { + const NAME: &'static str = "DOMAIN_SEPARATOR"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable<[u8; 32usize]> for DomainSeparator { + fn output(data: &[u8]) -> Result<[u8; 32usize], String> { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct PermitTypehash {} + impl PermitTypehash { + const METHOD_ID: [u8; 4] = [48u8, 173u8, 248u8, 31u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result<[u8; 32usize], String> { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result<[u8; 32usize], String> { + let mut values = ethabi::decode( + &[ethabi::ParamType::FixedBytes(32usize)], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok({ + let mut result = [0u8; 32]; + let v = values + .pop() + .expect("one output data should have existed") + .into_fixed_bytes() + .expect(INTERNAL_ERR); + result.copy_from_slice(&v); + result + }) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option<[u8; 32usize]> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for PermitTypehash { + const NAME: &'static str = "PERMIT_TYPEHASH"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable<[u8; 32usize]> for PermitTypehash { + fn output(data: &[u8]) -> Result<[u8; 32usize], String> { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Weth9 {} + impl Weth9 { + const METHOD_ID: [u8; 4] = [74u8, 164u8, 164u8, 252u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result, String> { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result, String> { + let mut values = ethabi::decode( + &[ethabi::ParamType::Address], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Weth9 { + const NAME: &'static str = "WETH9"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable> for Weth9 { + fn output(data: &[u8]) -> Result, String> { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Approve { + pub to: Vec, + pub token_id: substreams::scalar::BigInt, + } + impl Approve { + const METHOD_ID: [u8; 4] = [9u8, 94u8, 167u8, 179u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Address, ethabi::ParamType::Uint(256usize)], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + to: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + token_id: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address(ethabi::Address::from_slice(&self.to)), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.token_id.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + } + impl substreams_ethereum::Function for Approve { + const NAME: &'static str = "approve"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct BalanceOf { + pub owner: Vec, + } + impl BalanceOf { + const METHOD_ID: [u8; 4] = [112u8, 160u8, 130u8, 49u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Address], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + owner: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ethabi::Token::Address(ethabi::Address::from_slice(&self.owner))], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok({ + let mut v = [0 as u8; 32]; + values + .pop() + .expect("one output data should have existed") + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for BalanceOf { + const NAME: &'static str = "balanceOf"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable + for BalanceOf { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct BaseUri {} + impl BaseUri { + const METHOD_ID: [u8; 4] = [108u8, 3u8, 96u8, 235u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::String], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_string() + .expect(INTERNAL_ERR), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for BaseUri { + const NAME: &'static str = "baseURI"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable for BaseUri { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Burn { + pub token_id: substreams::scalar::BigInt, + } + impl Burn { + const METHOD_ID: [u8; 4] = [66u8, 150u8, 108u8, 104u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + token_id: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.token_id.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + } + impl substreams_ethereum::Function for Burn { + const NAME: &'static str = "burn"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Collect { + pub token_id: substreams::scalar::BigInt, + pub recipient: Vec, + pub amount0_max: substreams::scalar::BigInt, + pub amount1_max: substreams::scalar::BigInt, + } + impl Collect { + const METHOD_ID: [u8; 4] = [38u8, 14u8, 18u8, 176u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Address, + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Uint(128usize), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + token_id: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + recipient: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + amount0_max: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount1_max: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.token_id.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Address( + ethabi::Address::from_slice(&self.recipient), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.amount0_max.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.amount1_max.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + Self::output(call.return_data.as_ref()) + } + pub fn output( + data: &[u8], + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + values.reverse(); + Ok(( + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + )) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call( + &self, + address: Vec, + ) -> Option<(substreams::scalar::BigInt, substreams::scalar::BigInt)> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Collect { + const NAME: &'static str = "collect"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + > for Collect { + fn output( + data: &[u8], + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct CreateAndInitializePoolIfNecessary { + pub token_a: Vec, + pub token_b: Vec, + pub fee: substreams::scalar::BigInt, + pub sqrt_price_x96: substreams::scalar::BigInt, + } + impl CreateAndInitializePoolIfNecessary { + const METHOD_ID: [u8; 4] = [19u8, 234u8, 213u8, 98u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Address, + ethabi::ParamType::Uint(24usize), + ethabi::ParamType::Uint(160usize), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + token_a: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + token_b: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + fee: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + sqrt_price_x96: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address( + ethabi::Address::from_slice(&self.token_a), + ), + ethabi::Token::Address( + ethabi::Address::from_slice(&self.token_b), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.fee.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.sqrt_price_x96.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result, String> { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result, String> { + let mut values = ethabi::decode( + &[ethabi::ParamType::Address], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for CreateAndInitializePoolIfNecessary { + const NAME: &'static str = "createAndInitializePoolIfNecessary"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable> + for CreateAndInitializePoolIfNecessary { + fn output(data: &[u8]) -> Result, String> { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct DecreaseLiquidity { + pub token_id: substreams::scalar::BigInt, + pub liquidity: substreams::scalar::BigInt, + pub amount0_min: substreams::scalar::BigInt, + pub amount1_min: substreams::scalar::BigInt, + pub deadline: substreams::scalar::BigInt, + } + impl DecreaseLiquidity { + const METHOD_ID: [u8; 4] = [3u8, 163u8, 242u8, 171u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + token_id: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + liquidity: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount0_min: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount1_min: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + deadline: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.token_id.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.liquidity.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.amount0_min.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.amount1_min.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.deadline.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + Self::output(call.return_data.as_ref()) + } + pub fn output( + data: &[u8], + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + values.reverse(); + Ok(( + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + )) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call( + &self, + address: Vec, + ) -> Option<(substreams::scalar::BigInt, substreams::scalar::BigInt)> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for DecreaseLiquidity { + const NAME: &'static str = "decreaseLiquidity"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + > for DecreaseLiquidity { + fn output( + data: &[u8], + ) -> Result< + (substreams::scalar::BigInt, substreams::scalar::BigInt), + String, + > { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Factory {} + impl Factory { + const METHOD_ID: [u8; 4] = [196u8, 90u8, 1u8, 85u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result, String> { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result, String> { + let mut values = ethabi::decode( + &[ethabi::ParamType::Address], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Factory { + const NAME: &'static str = "factory"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable> for Factory { + fn output(data: &[u8]) -> Result, String> { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct GetApproved { + pub token_id: substreams::scalar::BigInt, + } + impl GetApproved { + const METHOD_ID: [u8; 4] = [8u8, 24u8, 18u8, 252u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + token_id: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.token_id.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result, String> { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result, String> { + let mut values = ethabi::decode( + &[ethabi::ParamType::Address], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for GetApproved { + const NAME: &'static str = "getApproved"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable> for GetApproved { + fn output(data: &[u8]) -> Result, String> { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct IncreaseLiquidity { + pub token_id: substreams::scalar::BigInt, + pub amount0_desired: substreams::scalar::BigInt, + pub amount1_desired: substreams::scalar::BigInt, + pub amount0_min: substreams::scalar::BigInt, + pub amount1_min: substreams::scalar::BigInt, + pub deadline: substreams::scalar::BigInt, + } + impl IncreaseLiquidity { + const METHOD_ID: [u8; 4] = [18u8, 215u8, 178u8, 196u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + token_id: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount0_desired: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount1_desired: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount0_min: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount1_min: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + deadline: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.token_id.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.amount0_desired.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.amount1_desired.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.amount0_min.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.amount1_min.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.deadline.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + String, + > { + Self::output(call.return_data.as_ref()) + } + pub fn output( + data: &[u8], + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + String, + > { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + values.reverse(); + Ok(( + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + )) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call( + &self, + address: Vec, + ) -> Option< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + > { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for IncreaseLiquidity { + const NAME: &'static str = "increaseLiquidity"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + > for IncreaseLiquidity { + fn output( + data: &[u8], + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + String, + > { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct IsApprovedForAll { + pub owner: Vec, + pub operator: Vec, + } + impl IsApprovedForAll { + const METHOD_ID: [u8; 4] = [233u8, 133u8, 233u8, 197u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Address, ethabi::ParamType::Address], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + owner: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + operator: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address(ethabi::Address::from_slice(&self.owner)), + ethabi::Token::Address( + ethabi::Address::from_slice(&self.operator), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Bool], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_bool() + .expect(INTERNAL_ERR), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for IsApprovedForAll { + const NAME: &'static str = "isApprovedForAll"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable for IsApprovedForAll { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Mint { + pub params: ( + Vec, + Vec, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + Vec, + substreams::scalar::BigInt, + ), + } + impl Mint { + const METHOD_ID: [u8; 4] = [136u8, 49u8, 100u8, 86u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Tuple( + vec![ + ethabi::ParamType::Address, ethabi::ParamType::Address, + ethabi::ParamType::Uint(24usize), + ethabi::ParamType::Int(24usize), + ethabi::ParamType::Int(24usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Address, + ethabi::ParamType::Uint(256usize) + ], + ), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + params: { + let tuple_elements = values + .pop() + .expect(INTERNAL_ERR) + .into_tuple() + .expect(INTERNAL_ERR); + ( + tuple_elements[0usize] + .clone() + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + tuple_elements[1usize] + .clone() + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + { + let mut v = [0 as u8; 32]; + tuple_elements[2usize] + .clone() + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + tuple_elements[3usize] + .clone() + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + tuple_elements[4usize] + .clone() + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + tuple_elements[5usize] + .clone() + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + tuple_elements[6usize] + .clone() + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + tuple_elements[7usize] + .clone() + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + tuple_elements[8usize] + .clone() + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + tuple_elements[9usize] + .clone() + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + { + let mut v = [0 as u8; 32]; + tuple_elements[10usize] + .clone() + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + ) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Tuple( + vec![ + ethabi::Token::Address(ethabi::Address::from_slice(& self + .params.0)), + ethabi::Token::Address(ethabi::Address::from_slice(& self + .params.1)), + ethabi::Token::Uint(ethabi::Uint::from_big_endian(match self + .params.2.clone().to_bytes_be() { (num_bigint::Sign::Plus, + bytes) => bytes, (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") }, } + .as_slice(),),), { let non_full_signed_bytes = self.params.3 + .to_signed_bytes_be(); let full_signed_bytes_init = if + non_full_signed_bytes[0] & 0x80 == 0x80 { 0xff } else { 0x00 + }; let mut full_signed_bytes = [full_signed_bytes_init as + u8; 32]; non_full_signed_bytes.into_iter().rev().enumerate() + .for_each(| (i, byte) | full_signed_bytes[31 - i] = byte); + ethabi::Token::Int(ethabi::Int::from_big_endian(full_signed_bytes + .as_ref())) }, { let non_full_signed_bytes = self.params.4 + .to_signed_bytes_be(); let full_signed_bytes_init = if + non_full_signed_bytes[0] & 0x80 == 0x80 { 0xff } else { 0x00 + }; let mut full_signed_bytes = [full_signed_bytes_init as + u8; 32]; non_full_signed_bytes.into_iter().rev().enumerate() + .for_each(| (i, byte) | full_signed_bytes[31 - i] = byte); + ethabi::Token::Int(ethabi::Int::from_big_endian(full_signed_bytes + .as_ref())) }, + ethabi::Token::Uint(ethabi::Uint::from_big_endian(match self + .params.5.clone().to_bytes_be() { (num_bigint::Sign::Plus, + bytes) => bytes, (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") }, } + .as_slice(),),), + ethabi::Token::Uint(ethabi::Uint::from_big_endian(match self + .params.6.clone().to_bytes_be() { (num_bigint::Sign::Plus, + bytes) => bytes, (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") }, } + .as_slice(),),), + ethabi::Token::Uint(ethabi::Uint::from_big_endian(match self + .params.7.clone().to_bytes_be() { (num_bigint::Sign::Plus, + bytes) => bytes, (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") }, } + .as_slice(),),), + ethabi::Token::Uint(ethabi::Uint::from_big_endian(match self + .params.8.clone().to_bytes_be() { (num_bigint::Sign::Plus, + bytes) => bytes, (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") }, } + .as_slice(),),), + ethabi::Token::Address(ethabi::Address::from_slice(& self + .params.9)), + ethabi::Token::Uint(ethabi::Uint::from_big_endian(match self + .params.10.clone().to_bytes_be() { (num_bigint::Sign::Plus, + bytes) => bytes, (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") }, } + .as_slice(),),) + ], + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + String, + > { + Self::output(call.return_data.as_ref()) + } + pub fn output( + data: &[u8], + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + String, + > { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + values.reverse(); + Ok(( + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + )) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call( + &self, + address: Vec, + ) -> Option< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + > { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Mint { + const NAME: &'static str = "mint"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + > for Mint { + fn output( + data: &[u8], + ) -> Result< + ( + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + String, + > { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Multicall { + pub data: Vec>, + } + impl Multicall { + const METHOD_ID: [u8; 4] = [172u8, 150u8, 80u8, 216u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Array(Box::new(ethabi::ParamType::Bytes))], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + data: values + .pop() + .expect(INTERNAL_ERR) + .into_array() + .expect(INTERNAL_ERR) + .into_iter() + .map(|inner| inner.into_bytes().expect(INTERNAL_ERR)) + .collect(), + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + { + let v = self + .data + .iter() + .map(|inner| ethabi::Token::Bytes(inner.clone())) + .collect(); + ethabi::Token::Array(v) + }, + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result>, String> { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result>, String> { + let mut values = ethabi::decode( + &[ethabi::ParamType::Array(Box::new(ethabi::ParamType::Bytes))], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_array() + .expect(INTERNAL_ERR) + .into_iter() + .map(|inner| inner.into_bytes().expect(INTERNAL_ERR)) + .collect(), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option>> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Multicall { + const NAME: &'static str = "multicall"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable>> for Multicall { + fn output(data: &[u8]) -> Result>, String> { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Name {} + impl Name { + const METHOD_ID: [u8; 4] = [6u8, 253u8, 222u8, 3u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::String], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_string() + .expect(INTERNAL_ERR), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Name { + const NAME: &'static str = "name"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable for Name { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct OwnerOf { + pub token_id: substreams::scalar::BigInt, + } + impl OwnerOf { + const METHOD_ID: [u8; 4] = [99u8, 82u8, 33u8, 30u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + token_id: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.token_id.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result, String> { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result, String> { + let mut values = ethabi::decode( + &[ethabi::ParamType::Address], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option> { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for OwnerOf { + const NAME: &'static str = "ownerOf"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable> for OwnerOf { + fn output(data: &[u8]) -> Result, String> { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Permit { + pub spender: Vec, + pub token_id: substreams::scalar::BigInt, + pub deadline: substreams::scalar::BigInt, + pub v: substreams::scalar::BigInt, + pub r: [u8; 32usize], + pub s: [u8; 32usize], + } + impl Permit { + const METHOD_ID: [u8; 4] = [122u8, 194u8, 255u8, 123u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(8usize), + ethabi::ParamType::FixedBytes(32usize), + ethabi::ParamType::FixedBytes(32usize), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + spender: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + token_id: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + deadline: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + v: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + r: { + let mut result = [0u8; 32]; + let v = values + .pop() + .expect(INTERNAL_ERR) + .into_fixed_bytes() + .expect(INTERNAL_ERR); + result.copy_from_slice(&v); + result + }, + s: { + let mut result = [0u8; 32]; + let v = values + .pop() + .expect(INTERNAL_ERR) + .into_fixed_bytes() + .expect(INTERNAL_ERR); + result.copy_from_slice(&v); + result + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address( + ethabi::Address::from_slice(&self.spender), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.token_id.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.deadline.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.v.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::FixedBytes(self.r.as_ref().to_vec()), + ethabi::Token::FixedBytes(self.s.as_ref().to_vec()), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + } + impl substreams_ethereum::Function for Permit { + const NAME: &'static str = "permit"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Positions { + pub token_id: substreams::scalar::BigInt, + } + impl Positions { + const METHOD_ID: [u8; 4] = [153u8, 251u8, 171u8, 136u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + token_id: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.token_id.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result< + ( + substreams::scalar::BigInt, + Vec, + Vec, + Vec, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + String, + > { + Self::output(call.return_data.as_ref()) + } + pub fn output( + data: &[u8], + ) -> Result< + ( + substreams::scalar::BigInt, + Vec, + Vec, + Vec, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + String, + > { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(96usize), + ethabi::ParamType::Address, + ethabi::ParamType::Address, + ethabi::ParamType::Address, + ethabi::ParamType::Uint(24usize), + ethabi::ParamType::Int(24usize), + ethabi::ParamType::Int(24usize), + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Uint(128usize), + ], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + values.reverse(); + Ok(( + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_int() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_signed_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + )) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call( + &self, + address: Vec, + ) -> Option< + ( + substreams::scalar::BigInt, + Vec, + Vec, + Vec, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + > { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Positions { + const NAME: &'static str = "positions"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable< + ( + substreams::scalar::BigInt, + Vec, + Vec, + Vec, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + > for Positions { + fn output( + data: &[u8], + ) -> Result< + ( + substreams::scalar::BigInt, + Vec, + Vec, + Vec, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + substreams::scalar::BigInt, + ), + String, + > { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct SafeTransferFrom1 { + pub from: Vec, + pub to: Vec, + pub token_id: substreams::scalar::BigInt, + } + impl SafeTransferFrom1 { + const METHOD_ID: [u8; 4] = [66u8, 132u8, 46u8, 14u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Address, + ethabi::ParamType::Uint(256usize), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + from: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + to: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + token_id: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address(ethabi::Address::from_slice(&self.from)), + ethabi::Token::Address(ethabi::Address::from_slice(&self.to)), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.token_id.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + } + impl substreams_ethereum::Function for SafeTransferFrom1 { + const NAME: &'static str = "safeTransferFrom1"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct SafeTransferFrom2 { + pub from: Vec, + pub to: Vec, + pub token_id: substreams::scalar::BigInt, + pub data: Vec, + } + impl SafeTransferFrom2 { + const METHOD_ID: [u8; 4] = [184u8, 141u8, 79u8, 222u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Address, + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Bytes, + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + from: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + to: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + token_id: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + data: values + .pop() + .expect(INTERNAL_ERR) + .into_bytes() + .expect(INTERNAL_ERR), + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address(ethabi::Address::from_slice(&self.from)), + ethabi::Token::Address(ethabi::Address::from_slice(&self.to)), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.token_id.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Bytes(self.data.clone()), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + } + impl substreams_ethereum::Function for SafeTransferFrom2 { + const NAME: &'static str = "safeTransferFrom2"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct SelfPermit { + pub token: Vec, + pub value: substreams::scalar::BigInt, + pub deadline: substreams::scalar::BigInt, + pub v: substreams::scalar::BigInt, + pub r: [u8; 32usize], + pub s: [u8; 32usize], + } + impl SelfPermit { + const METHOD_ID: [u8; 4] = [243u8, 153u8, 92u8, 103u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(8usize), + ethabi::ParamType::FixedBytes(32usize), + ethabi::ParamType::FixedBytes(32usize), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + token: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + value: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + deadline: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + v: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + r: { + let mut result = [0u8; 32]; + let v = values + .pop() + .expect(INTERNAL_ERR) + .into_fixed_bytes() + .expect(INTERNAL_ERR); + result.copy_from_slice(&v); + result + }, + s: { + let mut result = [0u8; 32]; + let v = values + .pop() + .expect(INTERNAL_ERR) + .into_fixed_bytes() + .expect(INTERNAL_ERR); + result.copy_from_slice(&v); + result + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address(ethabi::Address::from_slice(&self.token)), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.value.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.deadline.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.v.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::FixedBytes(self.r.as_ref().to_vec()), + ethabi::Token::FixedBytes(self.s.as_ref().to_vec()), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + } + impl substreams_ethereum::Function for SelfPermit { + const NAME: &'static str = "selfPermit"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct SelfPermitAllowed { + pub token: Vec, + pub nonce: substreams::scalar::BigInt, + pub expiry: substreams::scalar::BigInt, + pub v: substreams::scalar::BigInt, + pub r: [u8; 32usize], + pub s: [u8; 32usize], + } + impl SelfPermitAllowed { + const METHOD_ID: [u8; 4] = [70u8, 89u8, 164u8, 148u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(8usize), + ethabi::ParamType::FixedBytes(32usize), + ethabi::ParamType::FixedBytes(32usize), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + token: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + nonce: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + expiry: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + v: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + r: { + let mut result = [0u8; 32]; + let v = values + .pop() + .expect(INTERNAL_ERR) + .into_fixed_bytes() + .expect(INTERNAL_ERR); + result.copy_from_slice(&v); + result + }, + s: { + let mut result = [0u8; 32]; + let v = values + .pop() + .expect(INTERNAL_ERR) + .into_fixed_bytes() + .expect(INTERNAL_ERR); + result.copy_from_slice(&v); + result + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address(ethabi::Address::from_slice(&self.token)), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.nonce.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.expiry.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.v.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::FixedBytes(self.r.as_ref().to_vec()), + ethabi::Token::FixedBytes(self.s.as_ref().to_vec()), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + } + impl substreams_ethereum::Function for SelfPermitAllowed { + const NAME: &'static str = "selfPermitAllowed"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct SelfPermitAllowedIfNecessary { + pub token: Vec, + pub nonce: substreams::scalar::BigInt, + pub expiry: substreams::scalar::BigInt, + pub v: substreams::scalar::BigInt, + pub r: [u8; 32usize], + pub s: [u8; 32usize], + } + impl SelfPermitAllowedIfNecessary { + const METHOD_ID: [u8; 4] = [164u8, 167u8, 143u8, 12u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(8usize), + ethabi::ParamType::FixedBytes(32usize), + ethabi::ParamType::FixedBytes(32usize), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + token: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + nonce: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + expiry: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + v: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + r: { + let mut result = [0u8; 32]; + let v = values + .pop() + .expect(INTERNAL_ERR) + .into_fixed_bytes() + .expect(INTERNAL_ERR); + result.copy_from_slice(&v); + result + }, + s: { + let mut result = [0u8; 32]; + let v = values + .pop() + .expect(INTERNAL_ERR) + .into_fixed_bytes() + .expect(INTERNAL_ERR); + result.copy_from_slice(&v); + result + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address(ethabi::Address::from_slice(&self.token)), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.nonce.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.expiry.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.v.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::FixedBytes(self.r.as_ref().to_vec()), + ethabi::Token::FixedBytes(self.s.as_ref().to_vec()), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + } + impl substreams_ethereum::Function for SelfPermitAllowedIfNecessary { + const NAME: &'static str = "selfPermitAllowedIfNecessary"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct SelfPermitIfNecessary { + pub token: Vec, + pub value: substreams::scalar::BigInt, + pub deadline: substreams::scalar::BigInt, + pub v: substreams::scalar::BigInt, + pub r: [u8; 32usize], + pub s: [u8; 32usize], + } + impl SelfPermitIfNecessary { + const METHOD_ID: [u8; 4] = [194u8, 227u8, 20u8, 10u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(8usize), + ethabi::ParamType::FixedBytes(32usize), + ethabi::ParamType::FixedBytes(32usize), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + token: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + value: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + deadline: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + v: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + r: { + let mut result = [0u8; 32]; + let v = values + .pop() + .expect(INTERNAL_ERR) + .into_fixed_bytes() + .expect(INTERNAL_ERR); + result.copy_from_slice(&v); + result + }, + s: { + let mut result = [0u8; 32]; + let v = values + .pop() + .expect(INTERNAL_ERR) + .into_fixed_bytes() + .expect(INTERNAL_ERR); + result.copy_from_slice(&v); + result + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address(ethabi::Address::from_slice(&self.token)), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.value.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.deadline.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.v.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::FixedBytes(self.r.as_ref().to_vec()), + ethabi::Token::FixedBytes(self.s.as_ref().to_vec()), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + } + impl substreams_ethereum::Function for SelfPermitIfNecessary { + const NAME: &'static str = "selfPermitIfNecessary"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct SetApprovalForAll { + pub operator: Vec, + pub approved: bool, + } + impl SetApprovalForAll { + const METHOD_ID: [u8; 4] = [162u8, 44u8, 180u8, 101u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Address, ethabi::ParamType::Bool], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + operator: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + approved: values + .pop() + .expect(INTERNAL_ERR) + .into_bool() + .expect(INTERNAL_ERR), + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address( + ethabi::Address::from_slice(&self.operator), + ), + ethabi::Token::Bool(self.approved.clone()), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + } + impl substreams_ethereum::Function for SetApprovalForAll { + const NAME: &'static str = "setApprovalForAll"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct SupportsInterface { + pub interface_id: [u8; 4usize], + } + impl SupportsInterface { + const METHOD_ID: [u8; 4] = [1u8, 255u8, 201u8, 167u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::FixedBytes(4usize)], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + interface_id: { + let mut result = [0u8; 4]; + let v = values + .pop() + .expect(INTERNAL_ERR) + .into_fixed_bytes() + .expect(INTERNAL_ERR); + result.copy_from_slice(&v); + result + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ethabi::Token::FixedBytes(self.interface_id.as_ref().to_vec())], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Bool], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_bool() + .expect(INTERNAL_ERR), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for SupportsInterface { + const NAME: &'static str = "supportsInterface"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable for SupportsInterface { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct SweepToken { + pub token: Vec, + pub amount_minimum: substreams::scalar::BigInt, + pub recipient: Vec, + } + impl SweepToken { + const METHOD_ID: [u8; 4] = [223u8, 42u8, 181u8, 187u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Address, + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + token: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + amount_minimum: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + recipient: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address(ethabi::Address::from_slice(&self.token)), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.amount_minimum.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Address( + ethabi::Address::from_slice(&self.recipient), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + } + impl substreams_ethereum::Function for SweepToken { + const NAME: &'static str = "sweepToken"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Symbol {} + impl Symbol { + const METHOD_ID: [u8; 4] = [149u8, 216u8, 155u8, 65u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::String], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_string() + .expect(INTERNAL_ERR), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for Symbol { + const NAME: &'static str = "symbol"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable for Symbol { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct TokenByIndex { + pub index: substreams::scalar::BigInt, + } + impl TokenByIndex { + const METHOD_ID: [u8; 4] = [79u8, 108u8, 204u8, 231u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + index: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.index.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok({ + let mut v = [0 as u8; 32]; + values + .pop() + .expect("one output data should have existed") + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for TokenByIndex { + const NAME: &'static str = "tokenByIndex"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable + for TokenByIndex { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct TokenOfOwnerByIndex { + pub owner: Vec, + pub index: substreams::scalar::BigInt, + } + impl TokenOfOwnerByIndex { + const METHOD_ID: [u8; 4] = [47u8, 116u8, 92u8, 89u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Address, ethabi::ParamType::Uint(256usize)], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + owner: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + index: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address(ethabi::Address::from_slice(&self.owner)), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.index.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok({ + let mut v = [0 as u8; 32]; + values + .pop() + .expect("one output data should have existed") + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for TokenOfOwnerByIndex { + const NAME: &'static str = "tokenOfOwnerByIndex"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable + for TokenOfOwnerByIndex { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct TokenUri { + pub token_id: substreams::scalar::BigInt, + } + impl TokenUri { + const METHOD_ID: [u8; 4] = [200u8, 123u8, 86u8, 221u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + token_id: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.token_id.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::String], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok( + values + .pop() + .expect("one output data should have existed") + .into_string() + .expect(INTERNAL_ERR), + ) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for TokenUri { + const NAME: &'static str = "tokenURI"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable for TokenUri { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct TotalSupply {} + impl TotalSupply { + const METHOD_ID: [u8; 4] = [24u8, 22u8, 13u8, 221u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Ok(Self {}) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode(&[]); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn output_call( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::output(call.return_data.as_ref()) + } + pub fn output(data: &[u8]) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + data.as_ref(), + ) + .map_err(|e| format!("unable to decode output data: {:?}", e))?; + Ok({ + let mut v = [0 as u8; 32]; + values + .pop() + .expect("one output data should have existed") + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }) + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + pub fn call(&self, address: Vec) -> Option { + use substreams_ethereum::pb::eth::rpc; + let rpc_calls = rpc::RpcCalls { + calls: vec![ + rpc::RpcCall { to_addr : address, data : self.encode(), } + ], + }; + let responses = substreams_ethereum::rpc::eth_call(&rpc_calls).responses; + let response = responses + .get(0) + .expect("one response should have existed"); + if response.failed { + return None; + } + match Self::output(response.raw.as_ref()) { + Ok(data) => Some(data), + Err(err) => { + use substreams_ethereum::Function; + substreams::log::info!( + "Call output for function `{}` failed to decode with error: {}", + Self::NAME, err + ); + None + } + } + } + } + impl substreams_ethereum::Function for TotalSupply { + const NAME: &'static str = "totalSupply"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + impl substreams_ethereum::rpc::RPCDecodable + for TotalSupply { + fn output(data: &[u8]) -> Result { + Self::output(data) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct TransferFrom { + pub from: Vec, + pub to: Vec, + pub token_id: substreams::scalar::BigInt, + } + impl TransferFrom { + const METHOD_ID: [u8; 4] = [35u8, 184u8, 114u8, 221u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Address, + ethabi::ParamType::Uint(256usize), + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + from: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + to: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + token_id: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Address(ethabi::Address::from_slice(&self.from)), + ethabi::Token::Address(ethabi::Address::from_slice(&self.to)), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.token_id.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + } + impl substreams_ethereum::Function for TransferFrom { + const NAME: &'static str = "transferFrom"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct UniswapV3MintCallback { + pub amount0_owed: substreams::scalar::BigInt, + pub amount1_owed: substreams::scalar::BigInt, + pub data: Vec, + } + impl UniswapV3MintCallback { + const METHOD_ID: [u8; 4] = [211u8, 72u8, 121u8, 151u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Bytes, + ], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + amount0_owed: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount1_owed: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + data: values + .pop() + .expect(INTERNAL_ERR) + .into_bytes() + .expect(INTERNAL_ERR), + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.amount0_owed.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.amount1_owed.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Bytes(self.data.clone()), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + } + impl substreams_ethereum::Function for UniswapV3MintCallback { + const NAME: &'static str = "uniswapV3MintCallback"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct UnwrapWeth9 { + pub amount_minimum: substreams::scalar::BigInt, + pub recipient: Vec, + } + impl UnwrapWeth9 { + const METHOD_ID: [u8; 4] = [73u8, 64u8, 75u8, 124u8]; + pub fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + let maybe_data = call.input.get(4..); + if maybe_data.is_none() { + return Err("no data to decode".to_string()); + } + let mut values = ethabi::decode( + &[ethabi::ParamType::Uint(256usize), ethabi::ParamType::Address], + maybe_data.unwrap(), + ) + .map_err(|e| format!("unable to decode call.input: {:?}", e))?; + values.reverse(); + Ok(Self { + amount_minimum: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + recipient: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + }) + } + pub fn encode(&self) -> Vec { + let data = ethabi::encode( + &[ + ethabi::Token::Uint( + ethabi::Uint::from_big_endian( + match self.amount_minimum.clone().to_bytes_be() { + (num_bigint::Sign::Plus, bytes) => bytes, + (num_bigint::Sign::NoSign, bytes) => bytes, + (num_bigint::Sign::Minus, _) => { + panic!("negative numbers are not supported") + } + } + .as_slice(), + ), + ), + ethabi::Token::Address( + ethabi::Address::from_slice(&self.recipient), + ), + ], + ); + let mut encoded = Vec::with_capacity(4 + data.len()); + encoded.extend(Self::METHOD_ID); + encoded.extend(data); + encoded + } + pub fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + match call.input.get(0..4) { + Some(signature) => Self::METHOD_ID == signature, + None => false, + } + } + } + impl substreams_ethereum::Function for UnwrapWeth9 { + const NAME: &'static str = "unwrapWETH9"; + fn match_call(call: &substreams_ethereum::pb::eth::v2::Call) -> bool { + Self::match_call(call) + } + fn decode( + call: &substreams_ethereum::pb::eth::v2::Call, + ) -> Result { + Self::decode(call) + } + fn encode(&self) -> Vec { + self.encode() + } + } + } + /// Contract's events. + #[allow(dead_code, unused_imports, unused_variables)] + pub mod events { + use super::INTERNAL_ERR; + #[derive(Debug, Clone, PartialEq)] + pub struct Approval { + pub owner: Vec, + pub approved: Vec, + pub token_id: substreams::scalar::BigInt, + } + impl Approval { + const TOPIC_ID: [u8; 32] = [ + 140u8, + 91u8, + 225u8, + 229u8, + 235u8, + 236u8, + 125u8, + 91u8, + 209u8, + 79u8, + 113u8, + 66u8, + 125u8, + 30u8, + 132u8, + 243u8, + 221u8, + 3u8, + 20u8, + 192u8, + 247u8, + 178u8, + 41u8, + 30u8, + 91u8, + 32u8, + 10u8, + 200u8, + 199u8, + 195u8, + 185u8, + 37u8, + ]; + pub fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + if log.topics.len() != 4usize { + return false; + } + if log.data.len() != 0usize { + return false; + } + return log.topics.get(0).expect("bounds already checked").as_ref() + == Self::TOPIC_ID; + } + pub fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Ok(Self { + owner: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[1usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'owner' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + approved: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[2usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'approved' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + token_id: { + let mut v = [0 as u8; 32]; + ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + log.topics[3usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'token_id' from topic of type 'uint256': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + } + impl substreams_ethereum::Event for Approval { + const NAME: &'static str = "Approval"; + fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + Self::match_log(log) + } + fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Self::decode(log) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct ApprovalForAll { + pub owner: Vec, + pub operator: Vec, + pub approved: bool, + } + impl ApprovalForAll { + const TOPIC_ID: [u8; 32] = [ + 23u8, + 48u8, + 126u8, + 171u8, + 57u8, + 171u8, + 97u8, + 7u8, + 232u8, + 137u8, + 152u8, + 69u8, + 173u8, + 61u8, + 89u8, + 189u8, + 150u8, + 83u8, + 242u8, + 0u8, + 242u8, + 32u8, + 146u8, + 4u8, + 137u8, + 202u8, + 43u8, + 89u8, + 55u8, + 105u8, + 108u8, + 49u8, + ]; + pub fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + if log.topics.len() != 3usize { + return false; + } + if log.data.len() != 32usize { + return false; + } + return log.topics.get(0).expect("bounds already checked").as_ref() + == Self::TOPIC_ID; + } + pub fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + let mut values = ethabi::decode( + &[ethabi::ParamType::Bool], + log.data.as_ref(), + ) + .map_err(|e| format!("unable to decode log.data: {:?}", e))?; + values.reverse(); + Ok(Self { + owner: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[1usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'owner' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + operator: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[2usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'operator' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + approved: values + .pop() + .expect(INTERNAL_ERR) + .into_bool() + .expect(INTERNAL_ERR), + }) + } + } + impl substreams_ethereum::Event for ApprovalForAll { + const NAME: &'static str = "ApprovalForAll"; + fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + Self::match_log(log) + } + fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Self::decode(log) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Collect { + pub token_id: substreams::scalar::BigInt, + pub recipient: Vec, + pub amount0: substreams::scalar::BigInt, + pub amount1: substreams::scalar::BigInt, + } + impl Collect { + const TOPIC_ID: [u8; 32] = [ + 64u8, + 208u8, + 239u8, + 209u8, + 165u8, + 61u8, + 96u8, + 236u8, + 191u8, + 64u8, + 151u8, + 27u8, + 157u8, + 175u8, + 125u8, + 201u8, + 1u8, + 120u8, + 195u8, + 170u8, + 220u8, + 122u8, + 171u8, + 23u8, + 101u8, + 99u8, + 39u8, + 56u8, + 250u8, + 139u8, + 143u8, + 1u8, + ]; + pub fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + if log.topics.len() != 2usize { + return false; + } + if log.data.len() != 96usize { + return false; + } + return log.topics.get(0).expect("bounds already checked").as_ref() + == Self::TOPIC_ID; + } + pub fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Address, + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ], + log.data.as_ref(), + ) + .map_err(|e| format!("unable to decode log.data: {:?}", e))?; + values.reverse(); + Ok(Self { + token_id: { + let mut v = [0 as u8; 32]; + ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + log.topics[1usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'token_id' from topic of type 'uint256': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + recipient: values + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + amount0: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount1: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + } + impl substreams_ethereum::Event for Collect { + const NAME: &'static str = "Collect"; + fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + Self::match_log(log) + } + fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Self::decode(log) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct DecreaseLiquidity { + pub token_id: substreams::scalar::BigInt, + pub liquidity: substreams::scalar::BigInt, + pub amount0: substreams::scalar::BigInt, + pub amount1: substreams::scalar::BigInt, + } + impl DecreaseLiquidity { + const TOPIC_ID: [u8; 32] = [ + 38u8, + 246u8, + 160u8, + 72u8, + 238u8, + 145u8, + 56u8, + 242u8, + 192u8, + 206u8, + 38u8, + 111u8, + 50u8, + 44u8, + 185u8, + 146u8, + 40u8, + 232u8, + 214u8, + 25u8, + 174u8, + 43u8, + 255u8, + 48u8, + 198u8, + 127u8, + 141u8, + 207u8, + 157u8, + 35u8, + 119u8, + 180u8, + ]; + pub fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + if log.topics.len() != 2usize { + return false; + } + if log.data.len() != 96usize { + return false; + } + return log.topics.get(0).expect("bounds already checked").as_ref() + == Self::TOPIC_ID; + } + pub fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ], + log.data.as_ref(), + ) + .map_err(|e| format!("unable to decode log.data: {:?}", e))?; + values.reverse(); + Ok(Self { + token_id: { + let mut v = [0 as u8; 32]; + ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + log.topics[1usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'token_id' from topic of type 'uint256': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + liquidity: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount0: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount1: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + } + impl substreams_ethereum::Event for DecreaseLiquidity { + const NAME: &'static str = "DecreaseLiquidity"; + fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + Self::match_log(log) + } + fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Self::decode(log) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct IncreaseLiquidity { + pub token_id: substreams::scalar::BigInt, + pub liquidity: substreams::scalar::BigInt, + pub amount0: substreams::scalar::BigInt, + pub amount1: substreams::scalar::BigInt, + } + impl IncreaseLiquidity { + const TOPIC_ID: [u8; 32] = [ + 48u8, + 103u8, + 4u8, + 139u8, + 238u8, + 227u8, + 27u8, + 37u8, + 178u8, + 241u8, + 104u8, + 31u8, + 136u8, + 218u8, + 200u8, + 56u8, + 200u8, + 187u8, + 163u8, + 106u8, + 242u8, + 91u8, + 251u8, + 43u8, + 124u8, + 247u8, + 71u8, + 58u8, + 88u8, + 71u8, + 227u8, + 95u8, + ]; + pub fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + if log.topics.len() != 2usize { + return false; + } + if log.data.len() != 96usize { + return false; + } + return log.topics.get(0).expect("bounds already checked").as_ref() + == Self::TOPIC_ID; + } + pub fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + let mut values = ethabi::decode( + &[ + ethabi::ParamType::Uint(128usize), + ethabi::ParamType::Uint(256usize), + ethabi::ParamType::Uint(256usize), + ], + log.data.as_ref(), + ) + .map_err(|e| format!("unable to decode log.data: {:?}", e))?; + values.reverse(); + Ok(Self { + token_id: { + let mut v = [0 as u8; 32]; + ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + log.topics[1usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'token_id' from topic of type 'uint256': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + liquidity: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount0: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + amount1: { + let mut v = [0 as u8; 32]; + values + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + } + impl substreams_ethereum::Event for IncreaseLiquidity { + const NAME: &'static str = "IncreaseLiquidity"; + fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + Self::match_log(log) + } + fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Self::decode(log) + } + } + #[derive(Debug, Clone, PartialEq)] + pub struct Transfer { + pub from: Vec, + pub to: Vec, + pub token_id: substreams::scalar::BigInt, + } + impl Transfer { + const TOPIC_ID: [u8; 32] = [ + 221u8, + 242u8, + 82u8, + 173u8, + 27u8, + 226u8, + 200u8, + 155u8, + 105u8, + 194u8, + 176u8, + 104u8, + 252u8, + 55u8, + 141u8, + 170u8, + 149u8, + 43u8, + 167u8, + 241u8, + 99u8, + 196u8, + 161u8, + 22u8, + 40u8, + 245u8, + 90u8, + 77u8, + 245u8, + 35u8, + 179u8, + 239u8, + ]; + pub fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + if log.topics.len() != 4usize { + return false; + } + if log.data.len() != 0usize { + return false; + } + return log.topics.get(0).expect("bounds already checked").as_ref() + == Self::TOPIC_ID; + } + pub fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Ok(Self { + from: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[1usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'from' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + to: ethabi::decode( + &[ethabi::ParamType::Address], + log.topics[2usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'to' from topic of type 'address': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_address() + .expect(INTERNAL_ERR) + .as_bytes() + .to_vec(), + token_id: { + let mut v = [0 as u8; 32]; + ethabi::decode( + &[ethabi::ParamType::Uint(256usize)], + log.topics[3usize].as_ref(), + ) + .map_err(|e| format!( + "unable to decode param 'token_id' from topic of type 'uint256': {:?}", + e + ))? + .pop() + .expect(INTERNAL_ERR) + .into_uint() + .expect(INTERNAL_ERR) + .to_big_endian(v.as_mut_slice()); + substreams::scalar::BigInt::from_unsigned_bytes_be(&v) + }, + }) + } + } + impl substreams_ethereum::Event for Transfer { + const NAME: &'static str = "Transfer"; + fn match_log(log: &substreams_ethereum::pb::eth::v2::Log) -> bool { + Self::match_log(log) + } + fn decode( + log: &substreams_ethereum::pb::eth::v2::Log, + ) -> Result { + Self::decode(log) + } + } + } \ No newline at end of file diff --git a/transforms/uniswap/src/lib.rs b/transforms/uniswap/src/lib.rs new file mode 100644 index 00000000000..634fac19729 --- /dev/null +++ b/transforms/uniswap/src/lib.rs @@ -0,0 +1,157 @@ +pub mod abi; +pub mod proto; +mod types; + +use std::str::FromStr; + +use abi::positionmanager::events::{DecreaseLiquidity, IncreaseLiquidity, Transfer}; +use borsh::{BorshDeserialize, BorshSerialize}; +use ethabi::Address; +use num_bigint::BigInt; +use prost::Message; +use prost_types::Any; +use proto::edgeandnode::uniswap::v1::event::Event2; +use substreams_ethereum::block_view::LogView; +use substreams_ethereum::pb::eth::v2::Block; + +use graph::indexer::{BlockTransform, EncodedBlock, EncodedTriggers, State}; + +use crate::abi::factory::events::PoolCreated; +use crate::abi::pool::events::{Burn, Flash, Initialize, Mint, Swap}; +use crate::abi::positionmanager::events::Collect; +use crate::proto::edgeandnode::uniswap::v1 as uniswap; + +const UNISWAP_V3_FACTORY: &str = "0x1F98431c8aD98523631AE4a59f267346ea31F984"; +pub const POOL_TAG: &str = "pool"; + +#[derive(BorshSerialize, BorshDeserialize)] +pub struct Pool { + address: Vec, + token0: Vec, + token1: Vec, + owner: Vec, +} + +#[derive(Clone, Debug)] +pub struct UniswapTransform { + factory_addr: Address, +} + +impl UniswapTransform { + #[allow(unused)] + pub fn new() -> Self { + Self { + factory_addr: Address::from_str(UNISWAP_V3_FACTORY).unwrap(), + } + } +} + +fn parse_log(log_view: &LogView) -> Option<(uniswap::EventType, Event2)> { + let log = &log_view.log; + if Collect::match_log(&log) { + let evt: uniswap::Collect = Collect::decode(log).unwrap().into(); + return Some((uniswap::EventType::Collect, Event2::Collect(evt))); + } + + if IncreaseLiquidity::match_log(&log) { + let evt: uniswap::IncreaseLiquidity = IncreaseLiquidity::decode(log).unwrap().into(); + return Some(( + uniswap::EventType::IncreaseLiquidity, + Event2::Increaseliquidity(evt), + )); + } + if DecreaseLiquidity::match_log(&log) { + let evt: uniswap::DecreaseLiquidity = DecreaseLiquidity::decode(log).unwrap().into(); + return Some(( + uniswap::EventType::DecreaseLiquidity, + Event2::Decreaseliquidity(evt), + )); + } + if Collect::match_log(&log) { + let evt: uniswap::Collect = Collect::decode(log).unwrap().into(); + return Some((uniswap::EventType::Collect, Event2::Collect(evt))); + } + if Transfer::match_log(&log) { + let evt: uniswap::Transfer = Transfer::decode(log).unwrap().into(); + return Some((uniswap::EventType::Transfer, Event2::Transfer(evt))); + } + if Initialize::match_log(&log) { + let evt: uniswap::Initialize = Initialize::decode(log).unwrap().into(); + return Some((uniswap::EventType::Initialize, Event2::Initialize(evt))); + } + if Swap::match_log(&log) { + let evt = Swap::decode(log).unwrap(); + let evt: uniswap::Swap = (log_view.receipt.transaction.from.clone(), log.index, evt).into(); + return Some((uniswap::EventType::Swap, Event2::Swap(evt))); + } + if Mint::match_log(&log) { + let evt = Mint::decode(log).unwrap(); + let evt: uniswap::Mint = (log_view.receipt.transaction.from.clone(), log.index, evt).into(); + return Some((uniswap::EventType::Mint, Event2::Mint(evt))); + } + if Burn::match_log(&log) { + let evt = Burn::decode(log).unwrap(); + let evt: uniswap::Burn = (log_view.receipt.transaction.from.clone(), log.index, evt).into(); + return Some((uniswap::EventType::Burn, Event2::Burn(evt))); + } + if Flash::match_log(&log) { + let evt: uniswap::Flash = Flash::decode(log).unwrap().into(); + return Some((uniswap::EventType::Flash, Event2::Flash(evt))); + } + + if PoolCreated::match_log(&log) { + let evt: uniswap::PoolCreated = PoolCreated::decode(log).unwrap().into(); + return Some((uniswap::EventType::PoolCreated, Event2::Poolcreated(evt))); + } + + None +} + +impl BlockTransform for UniswapTransform { + fn transform(&self, block: EncodedBlock, mut state: State) -> (State, EncodedTriggers) { + let mut events = vec![]; + let block = Block::decode(block.0.as_ref()).unwrap(); + + for log in block.logs().into_iter() { + // skip reverted blocks + if log.log.block_index == 0 { + continue; + } + + // // TODO: Improve perf . + let evt = parse_log(&log); + match evt { + Some((et, event)) => events.push(uniswap::Event { + owner: hex::encode(log.address()), + r#type: et.into(), + event: None, + address: hex::encode(log.address()), + block_number: block.header.as_ref().unwrap().number.try_into().unwrap(), + block_timestamp: block.timestamp_seconds().to_string(), + tx_hash: hex::encode(&log.receipt.transaction.hash), + tx_gas_used: log.receipt.transaction.gas_used.to_string(), + tx_gas_price: BigInt::from_bytes_le( + num_bigint::Sign::NoSign, + &log.receipt + .transaction + .gas_price + .clone() + .unwrap_or_default() + .bytes, + ) + .to_string(), + event2: Some(event), + }), + None => continue, + } + } + + // let bs = borsh::to_vec(&Events { events }).unwrap(); + let events = uniswap::Events { events }; + + ( + state, + EncodedTriggers(events.encode_to_vec().into_boxed_slice()), + ) + } +} diff --git a/transforms/uniswap/src/proto/edgeandnode.uniswap.v1.rs b/transforms/uniswap/src/proto/edgeandnode.uniswap.v1.rs new file mode 100644 index 00000000000..72a7bd16502 --- /dev/null +++ b/transforms/uniswap/src/proto/edgeandnode.uniswap.v1.rs @@ -0,0 +1,268 @@ +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Events { + #[prost(message, repeated, tag = "1")] + pub events: ::prost::alloc::vec::Vec, +} +/// Every address is stored as hex string. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Event { + /// Owner points to the address that originated this event + /// The PoolCreated will set this to factory, which is what we can use + /// to track different factories with compatible events. + #[prost(string, tag = "1")] + pub owner: ::prost::alloc::string::String, + #[prost(enumeration = "EventType", tag = "2")] + pub r#type: i32, + #[prost(message, optional, tag = "3")] + pub event: ::core::option::Option<::prost_types::Any>, + #[prost(string, tag = "4")] + pub address: ::prost::alloc::string::String, + #[prost(string, tag = "5")] + pub tx_hash: ::prost::alloc::string::String, + #[prost(string, tag = "6")] + pub tx_gas_used: ::prost::alloc::string::String, + #[prost(string, tag = "7")] + pub tx_gas_price: ::prost::alloc::string::String, + /// This duplicates data (as opposed to adding this data to the head) but AssemblyScript does + /// not support closures and so using the data is not super easy if it's in the header so I'll + /// leave it here. + #[prost(int32, tag = "8")] + pub block_number: i32, + #[prost(string, tag = "9")] + pub block_timestamp: ::prost::alloc::string::String, + #[prost(oneof = "event::Event2", tags = "10, 11, 12, 13, 14, 15, 16, 17, 18, 19")] + pub event2: ::core::option::Option, +} +/// Nested message and enum types in `Event`. +pub mod event { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Event2 { + #[prost(message, tag = "10")] + Poolcreated(super::PoolCreated), + #[prost(message, tag = "11")] + Increaseliquidity(super::IncreaseLiquidity), + #[prost(message, tag = "12")] + Decreaseliquidity(super::DecreaseLiquidity), + #[prost(message, tag = "13")] + Collect(super::Collect), + #[prost(message, tag = "14")] + Transfer(super::Transfer), + #[prost(message, tag = "15")] + Initialize(super::Initialize), + #[prost(message, tag = "16")] + Swap(super::Swap), + #[prost(message, tag = "17")] + Mint(super::Mint), + #[prost(message, tag = "18")] + Burn(super::Burn), + #[prost(message, tag = "19")] + Flash(super::Flash), + } +} +/// Factory +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PoolCreated { + #[prost(string, tag = "1")] + pub token0: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub token1: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub fee: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub tick_spacing: ::prost::alloc::string::String, + #[prost(string, tag = "5")] + pub pool: ::prost::alloc::string::String, +} +/// Position Manager +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IncreaseLiquidity { + #[prost(string, tag = "1")] + pub token_id: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub liquidity: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub amount0: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub amount1: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DecreaseLiquidity { + #[prost(string, tag = "1")] + pub token_id: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub liquidity: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub amount0: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub amount1: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Collect { + #[prost(string, tag = "1")] + pub token_id: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub recipient: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub amount0: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub amount1: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Transfer { + #[prost(string, tag = "1")] + pub from: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub to: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub token_id: ::prost::alloc::string::String, +} +/// Pool +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Initialize { + #[prost(string, tag = "1")] + pub sqrt_price_x96: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub tick: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Swap { + #[prost(string, tag = "1")] + pub sender: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub recipient: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub amount0: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub amount1: ::prost::alloc::string::String, + #[prost(string, tag = "5")] + pub sqrt_price_x96: ::prost::alloc::string::String, + #[prost(string, tag = "6")] + pub liquidity: ::prost::alloc::string::String, + #[prost(string, tag = "7")] + pub tick: ::prost::alloc::string::String, + #[prost(int32, tag = "8")] + pub log_index: i32, + #[prost(string, tag = "9")] + pub transaction_from: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Mint { + #[prost(string, tag = "1")] + pub sender: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub owner: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub tick_lower: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub tick_upper: ::prost::alloc::string::String, + #[prost(string, tag = "5")] + pub amount: ::prost::alloc::string::String, + #[prost(string, tag = "6")] + pub amount0: ::prost::alloc::string::String, + #[prost(string, tag = "7")] + pub amount1: ::prost::alloc::string::String, + #[prost(int32, tag = "8")] + pub log_index: i32, + #[prost(string, tag = "9")] + pub transaction_from: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Burn { + #[prost(string, tag = "1")] + pub owner: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub tick_lower: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub tick_upper: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub amount: ::prost::alloc::string::String, + #[prost(string, tag = "5")] + pub amount0: ::prost::alloc::string::String, + #[prost(string, tag = "6")] + pub amount1: ::prost::alloc::string::String, + #[prost(int32, tag = "7")] + pub log_index: i32, + #[prost(string, tag = "8")] + pub transaction_from: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Flash { + #[prost(string, tag = "1")] + pub sender: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub recipient: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub amount0: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub amount1: ::prost::alloc::string::String, + #[prost(string, tag = "5")] + pub paid0: ::prost::alloc::string::String, + #[prost(string, tag = "6")] + pub paid1: ::prost::alloc::string::String, +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum EventType { + /// Factory + PoolCreated = 0, + /// Position Manager + IncreaseLiquidity = 1, + DecreaseLiquidity = 2, + Collect = 3, + Transfer = 4, + /// Pool + Initialize = 5, + Swap = 6, + Mint = 7, + Burn = 8, + Flash = 9, +} +impl EventType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + EventType::PoolCreated => "POOL_CREATED", + EventType::IncreaseLiquidity => "INCREASE_LIQUIDITY", + EventType::DecreaseLiquidity => "DECREASE_LIQUIDITY", + EventType::Collect => "COLLECT", + EventType::Transfer => "TRANSFER", + EventType::Initialize => "INITIALIZE", + EventType::Swap => "SWAP", + EventType::Mint => "MINT", + EventType::Burn => "BURN", + EventType::Flash => "FLASH", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "POOL_CREATED" => Some(Self::PoolCreated), + "INCREASE_LIQUIDITY" => Some(Self::IncreaseLiquidity), + "DECREASE_LIQUIDITY" => Some(Self::DecreaseLiquidity), + "COLLECT" => Some(Self::Collect), + "TRANSFER" => Some(Self::Transfer), + "INITIALIZE" => Some(Self::Initialize), + "SWAP" => Some(Self::Swap), + "MINT" => Some(Self::Mint), + "BURN" => Some(Self::Burn), + "FLASH" => Some(Self::Flash), + _ => None, + } + } +} diff --git a/transforms/uniswap/src/proto/mod.rs b/transforms/uniswap/src/proto/mod.rs new file mode 100644 index 00000000000..df7c5ced8d0 --- /dev/null +++ b/transforms/uniswap/src/proto/mod.rs @@ -0,0 +1,7 @@ +pub mod edgeandnode { + pub mod uniswap { + pub mod v1 { + include!("edgeandnode.uniswap.v1.rs"); + } + } +} diff --git a/transforms/uniswap/src/types.rs b/transforms/uniswap/src/types.rs new file mode 100644 index 00000000000..fced6673354 --- /dev/null +++ b/transforms/uniswap/src/types.rs @@ -0,0 +1,201 @@ +use crate::{ + abi::{ + factory::events::PoolCreated, + pool::events::{Burn, Flash, Initialize, Mint, Swap}, + positionmanager::events::{Collect, DecreaseLiquidity, IncreaseLiquidity, Transfer}, + }, + proto::edgeandnode::uniswap::v1 as uniswap, +}; + +impl Into for PoolCreated { + fn into(self) -> uniswap::PoolCreated { + let PoolCreated { + token0, + token1, + fee, + tick_spacing, + pool, + } = self; + + uniswap::PoolCreated { + token0: hex::encode(token0), + token1: hex::encode(token1), + fee: fee.to_string(), + tick_spacing: tick_spacing.to_string(), + pool: hex::encode(pool), + } + } +} + +// Position manager events +impl Into for IncreaseLiquidity { + fn into(self) -> uniswap::IncreaseLiquidity { + let IncreaseLiquidity { + token_id, + liquidity, + amount0, + amount1, + } = self; + + uniswap::IncreaseLiquidity { + token_id: token_id.to_string(), + liquidity: liquidity.to_string(), + amount0: amount0.to_string(), + amount1: amount1.to_string(), + } + } +} +impl Into for DecreaseLiquidity { + fn into(self) -> uniswap::DecreaseLiquidity { + let DecreaseLiquidity { + token_id, + liquidity, + amount0, + amount1, + } = self; + + uniswap::DecreaseLiquidity { + token_id: token_id.to_string(), + liquidity: liquidity.to_string(), + amount0: amount0.to_string(), + amount1: amount1.to_string(), + } + } +} +impl Into for Collect { + fn into(self) -> uniswap::Collect { + let Collect { + token_id, + amount0, + amount1, + recipient, + } = self; + + uniswap::Collect { + token_id: token_id.to_string(), + amount0: amount0.to_string(), + amount1: amount1.to_string(), + recipient: hex::encode(recipient), + } + } +} +impl Into for Transfer { + fn into(self) -> uniswap::Transfer { + let Transfer { from, to, token_id } = self; + + uniswap::Transfer { + from: hex::encode(from), + to: hex::encode(to), + token_id: token_id.to_string(), + } + } +} + +// Pool events +impl Into for Initialize { + fn into(self) -> uniswap::Initialize { + let Initialize { + sqrt_price_x96, + tick, + } = self; + + uniswap::Initialize { + sqrt_price_x96: sqrt_price_x96.to_string(), + tick: tick.to_string(), + } + } +} +impl From<(Vec, u32, Swap)> for uniswap::Swap { + fn from(val: (Vec, u32, Swap)) -> Self { + let Swap { + sender, + recipient, + amount0, + amount1, + sqrt_price_x96, + liquidity, + tick, + } = val.2; + + uniswap::Swap { + sender: hex::encode(sender), + recipient: hex::encode(recipient), + amount0: amount0.to_string(), + amount1: amount1.to_string(), + sqrt_price_x96: sqrt_price_x96.to_string(), + liquidity: liquidity.to_string(), + tick: tick.to_string(), + log_index: val.1.try_into().unwrap(), + transaction_from: hex::encode(val.0), + } + } +} +impl From<(Vec, u32, Mint)> for uniswap::Mint { + fn from(val: (Vec, u32, Mint)) -> Self { + let Mint { + sender, + owner, + amount0, + amount1, + tick_lower, + tick_upper, + amount, + } = val.2; + + uniswap::Mint { + sender: hex::encode(sender), + amount0: amount0.to_string(), + amount1: amount1.to_string(), + owner: hex::encode(owner), + tick_lower: tick_lower.to_string(), + tick_upper: tick_upper.to_string(), + amount: amount.to_string(), + log_index: val.1.try_into().unwrap(), + transaction_from: hex::encode(val.0), + } + } +} +impl From<(Vec, u32, Burn)> for uniswap::Burn { + fn from(val: (Vec, u32, Burn)) -> Self { + let Burn { + owner, + amount0, + amount1, + tick_lower, + tick_upper, + amount, + } = val.2; + + uniswap::Burn { + amount0: amount0.to_string(), + amount1: amount1.to_string(), + owner: hex::encode(owner), + tick_lower: tick_lower.to_string(), + tick_upper: tick_upper.to_string(), + amount: amount.to_string(), + log_index: val.1.try_into().unwrap(), + transaction_from: hex::encode(val.0), + } + } +} +impl Into for Flash { + fn into(self) -> uniswap::Flash { + let Flash { + sender, + recipient, + amount0, + amount1, + paid0, + paid1, + } = self; + + uniswap::Flash { + sender: hex::encode(sender), + recipient: hex::encode(recipient), + amount0: amount0.to_string(), + amount1: amount1.to_string(), + paid0: paid0.to_string(), + paid1: paid1.to_string(), + } + } +}