diff --git a/Cargo.lock b/Cargo.lock index 71299dffa21..cae8894e939 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1368,7 +1368,7 @@ dependencies = [ "encode_unicode", "libc", "once_cell", - "unicode-width 0.2.0", + "unicode-width", "windows-sys 0.59.0", ] @@ -2846,11 +2846,10 @@ dependencies = [ "graphql-parser", "half", "hex", - "hex-literal 1.1.0", + "hex-literal", "http 0.2.12", "http 1.4.0", "http-body-util", - "humantime", "hyper 1.8.1", "hyper-util", "indoc", @@ -2955,26 +2954,6 @@ dependencies = [ "serde", "tokio", "tonic-build", - "trigger-filters", -] - -[[package]] -name = "graph-chain-substreams" -version = "0.36.0" -dependencies = [ - "anyhow", - "async-trait", - "base64 0.22.1", - "graph", - "graph-runtime-wasm", - "hex", - "prost", - "prost-types", - "semver", - "serde", - "tokio", - "tokio-stream", - "tonic-build", ] [[package]] @@ -2991,7 +2970,6 @@ dependencies = [ "graph", "graph-chain-ethereum", "graph-chain-near", - "graph-chain-substreams", "graph-runtime-wasm", "indoc", "itertools", @@ -3038,7 +3016,6 @@ dependencies = [ "graph", "graph-chain-ethereum", "graph-chain-near", - "graph-chain-substreams", "graph-core", "graph-graphql", "graph-server-http", @@ -3122,7 +3099,6 @@ dependencies = [ "graph", "graph-chain-ethereum", "graph-chain-near", - "graph-chain-substreams", "graph-graphql", ] @@ -3194,7 +3170,6 @@ dependencies = [ "async-trait", "graph", "graph-chain-ethereum", - "graph-chain-substreams", "graph-core", "graph-graphql", "graph-node", @@ -3475,12 +3450,6 @@ dependencies = [ "arrayvec 0.7.4", ] -[[package]] -name = "hex-literal" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" - [[package]] name = "hex-literal" version = "1.1.0" @@ -4874,15 +4843,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" -[[package]] -name = "pad" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2ad9b889f1b12e0b9ee24db044b5129150d5eada288edc800f789928dc8c0e3" -dependencies = [ - "unicode-width 0.1.13", -] - [[package]] name = "parity-scale-codec" version = "3.6.12" @@ -6689,84 +6649,6 @@ dependencies = [ "syn 2.0.106", ] -[[package]] -name = "substreams" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb63116b90d4c174114fb237a8916dd995c939874f7576333990a44d78b642a" -dependencies = [ - "anyhow", - "bigdecimal 0.3.1", - "hex", - "hex-literal 0.3.4", - "num-bigint 0.4.6", - "num-integer", - "num-traits", - "pad", - "pest", - "pest_derive", - "prost", - "prost-build", - "prost-types", - "substreams-macro", - "thiserror 1.0.61", -] - -[[package]] -name = "substreams-entity-change" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0587b8d5dd7bffb0415d544c31e742c4cabdb81bbe9a3abfffff125185e4e9e8" -dependencies = [ - "base64 0.13.1", - "prost", - "prost-types", - "substreams", -] - -[[package]] -name = "substreams-head-tracker" -version = "0.36.0" - -[[package]] -name = "substreams-macro" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f36f36e9da94db29f49daf3ab6b47b529b57c43fc5d58bc35b160aaad1a7233f" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", - "thiserror 1.0.61", -] - -[[package]] -name = "substreams-near-core" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01ef8a763c5a5604b16f4898ab75d39494ef785c457aaca1fd7761b299f40fbf" -dependencies = [ - "bs58 0.4.0", - "getrandom 0.2.15", - "hex", - "prost", - "prost-build", - "prost-types", -] - -[[package]] -name = "substreams-trigger-filter" -version = "0.36.0" -dependencies = [ - "hex", - "prost", - "substreams", - "substreams-entity-change", - "substreams-near-core", - "tonic-build", - "trigger-filters", -] - [[package]] name = "subtle" version = "2.6.1" @@ -6927,7 +6809,7 @@ dependencies = [ "graph-node", "graph-store-postgres", "hex", - "hex-literal 1.1.0", + "hex-literal", "lazy_static", "pretty_assertions", "prost-types", @@ -7526,13 +7408,6 @@ dependencies = [ "syn 2.0.106", ] -[[package]] -name = "trigger-filters" -version = "0.36.0" -dependencies = [ - "anyhow", -] - [[package]] name = "try-lock" version = "0.2.5" @@ -7640,12 +7515,6 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" -[[package]] -name = "unicode-width" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" - [[package]] name = "unicode-width" version = "0.2.0" @@ -8216,7 +8085,7 @@ dependencies = [ "bumpalo", "leb128fmt", "memchr", - "unicode-width 0.2.0", + "unicode-width", "wasm-encoder 0.244.0", ] diff --git a/Cargo.toml b/Cargo.toml index 861b70ea958..9097b9b399d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,6 @@ members = [ "chain/common", "chain/ethereum", "chain/near", - "chain/substreams", "gnd", "graphql", "node", @@ -21,9 +20,6 @@ members = [ "server/metrics", "store/postgres", "store/test-store", - "substreams/substreams-head-tracker", - "substreams/substreams-trigger-filter", - "substreams/trigger-filters", "graph", "tests", "graph/derive", diff --git a/chain/ethereum/examples/firehose.rs b/chain/ethereum/examples/firehose.rs index 5a70794dfe2..e5f85964fe1 100644 --- a/chain/ethereum/examples/firehose.rs +++ b/chain/ethereum/examples/firehose.rs @@ -38,7 +38,6 @@ async fn main() -> Result<(), Error> { false, SubgraphLimit::Unlimited, metrics, - false, )); loop { diff --git a/chain/ethereum/src/chain.rs b/chain/ethereum/src/chain.rs index 11ca025e0e2..4acfc4aab8b 100644 --- a/chain/ethereum/src/chain.rs +++ b/chain/ethereum/src/chain.rs @@ -16,9 +16,7 @@ use graph::prelude::{ retry, BlockHash, ComponentLoggerConfig, ElasticComponentLoggerConfig, EthereumBlock, EthereumCallCache, LightEthereumBlock, LightEthereumBlockExt, MetricsRegistry, StoreError, }; -use graph::schema::InputSchema; use graph::slog::{debug, error, trace, warn}; -use graph::substreams::Clock; use graph::{ blockchain::{ block_stream::{ @@ -114,18 +112,6 @@ impl BlockStreamBuilder for EthereumStreamBuilder { ))) } - async fn build_substreams( - &self, - _chain: &Chain, - _schema: InputSchema, - _deployment: DeploymentLocator, - _block_cursor: FirehoseCursor, - _subgraph_current_block: Option, - _filter: Arc<::TriggerFilter>, - ) -> Result>> { - unimplemented!() - } - async fn build_subgraph_block_stream( &self, chain: &Chain, @@ -1159,16 +1145,6 @@ impl BlockStreamMapper for FirehoseMapper { .await .map_err(BlockStreamError::from) } - - async fn handle_substreams_block( - &self, - _logger: &Logger, - _clock: Clock, - _cursor: FirehoseCursor, - _block: Vec, - ) -> Result, BlockStreamError> { - unimplemented!() - } } #[async_trait] diff --git a/chain/near/Cargo.toml b/chain/near/Cargo.toml index 6984c831cd8..7df0cc53966 100644 --- a/chain/near/Cargo.toml +++ b/chain/near/Cargo.toml @@ -19,5 +19,4 @@ graph-runtime-derive = { path = "../../runtime/derive" } [dev-dependencies] diesel = { workspace = true } -trigger-filters.path = "../../substreams/trigger-filters" -tokio = { workspace = true } \ No newline at end of file +tokio = { workspace = true } diff --git a/chain/near/build.rs b/chain/near/build.rs index 0bb50d10b27..a95e37d57ac 100644 --- a/chain/near/build.rs +++ b/chain/near/build.rs @@ -3,9 +3,6 @@ fn main() { tonic_build::configure() .out_dir("src/protobuf") .extern_path(".sf.near.codec.v1", "crate::codec::pbcodec") - .compile_protos( - &["proto/near.proto", "proto/substreams-triggers.proto"], - &["proto"], - ) + .compile_protos(&["proto/near.proto"], &["proto"]) .expect("Failed to compile Firehose NEAR proto(s)"); } diff --git a/chain/near/proto/substreams-triggers.proto b/chain/near/proto/substreams-triggers.proto deleted file mode 100644 index 947052a2566..00000000000 --- a/chain/near/proto/substreams-triggers.proto +++ /dev/null @@ -1,12 +0,0 @@ -syntax = "proto3"; - -import "near.proto"; - -package receipts.v1; - -message BlockAndReceipts { - sf.near.codec.v1.Block block = 1; - repeated sf.near.codec.v1.ExecutionOutcomeWithId outcome = 2; - repeated sf.near.codec.v1.Receipt receipt = 3; -} - diff --git a/chain/near/src/adapter.rs b/chain/near/src/adapter.rs index 4d6151aa5ca..b6e450f5741 100644 --- a/chain/near/src/adapter.rs +++ b/chain/near/src/adapter.rs @@ -251,14 +251,13 @@ mod test { use std::collections::HashSet; use super::NearBlockFilter; - use crate::adapter::{NearReceiptFilter, TriggerFilter, BASIC_RECEIPT_FILTER_TYPE_URL}; + use crate::adapter::{TriggerFilter, BASIC_RECEIPT_FILTER_TYPE_URL}; use graph::{ blockchain::TriggerFilter as _, firehose::{BasicReceiptFilter, PrefixSuffixPair}, }; use prost::Message; use prost_types::Any; - use trigger_filters::NearFilter; #[test] fn near_trigger_empty_filter() { @@ -365,124 +364,6 @@ mod test { ); } - #[test] - fn test_near_filter_params_serialization() -> anyhow::Result<()> { - struct Case<'a> { - name: &'a str, - input: NearReceiptFilter, - expected: NearFilter<'a>, - } - - let cases = vec![ - Case { - name: "empty", - input: NearReceiptFilter::default(), - expected: NearFilter::default(), - }, - Case { - name: "only full matches", - input: super::NearReceiptFilter { - accounts: HashSet::from_iter(vec!["acc1".into()]), - partial_accounts: HashSet::new(), - }, - expected: NearFilter { - accounts: HashSet::from_iter(vec!["acc1"]), - partial_accounts: HashSet::default(), - }, - }, - Case { - name: "only partial matches", - input: super::NearReceiptFilter { - accounts: HashSet::new(), - partial_accounts: HashSet::from_iter(vec![(Some("acc1".into()), None)]), - }, - expected: NearFilter { - accounts: HashSet::default(), - partial_accounts: HashSet::from_iter(vec![(Some("acc1"), None)]), - }, - }, - Case { - name: "both 1len matches", - input: super::NearReceiptFilter { - accounts: HashSet::from_iter(vec!["acc1".into()]), - partial_accounts: HashSet::from_iter(vec![(Some("s1".into()), None)]), - }, - expected: NearFilter { - accounts: HashSet::from_iter(vec!["acc1"]), - partial_accounts: HashSet::from_iter(vec![(Some("s1"), None)]), - }, - }, - Case { - name: "more partials matches", - input: super::NearReceiptFilter { - accounts: HashSet::from_iter(vec!["acc1".into()]), - partial_accounts: HashSet::from_iter(vec![ - (Some("s1".into()), None), - (None, Some("s3".into())), - (Some("s2".into()), Some("s2".into())), - ]), - }, - expected: NearFilter { - accounts: HashSet::from_iter(vec!["acc1"]), - partial_accounts: HashSet::from_iter(vec![ - (Some("s1"), None), - (None, Some("s3")), - (Some("s2"), Some("s2")), - ]), - }, - }, - Case { - name: "both matches", - input: NearReceiptFilter { - accounts: HashSet::from_iter(vec![ - "acc1".into(), - "=12-30786jhasdgmasd".into(), - "^&%^&^$".into(), - "acc3".into(), - ]), - partial_accounts: HashSet::from_iter(vec![ - (Some("1.2.2.3.45.5".into()), None), - (None, Some("kjysdfoiua6sd".into())), - (Some("120938pokasd".into()), Some("102938poai[sd]".into())), - ]), - }, - expected: NearFilter { - accounts: HashSet::from_iter(vec![ - "acc1", - "=12-30786jhasdgmasd", - "^&%^&^$", - "acc3", - ]), - partial_accounts: HashSet::from_iter(vec![ - (Some("1.2.2.3.45.5"), None), - (None, Some("kjysdfoiua6sd")), - (Some("120938pokasd"), Some("102938poai[sd]")), - ]), - }, - }, - ]; - - for case in cases.into_iter() { - let tf = TriggerFilter { - block_filter: NearBlockFilter::default(), - receipt_filter: case.input, - }; - let param = tf.to_module_params(); - let filter = NearFilter::try_from(param.as_str()).expect(&format!( - "case: {}, the filter to parse params correctly", - case.name - )); - - assert_eq!( - filter, case.expected, - "case {},param:\n{}", - case.name, param - ); - } - - Ok(()) - } - fn decode_filter(firehose_filter: Vec) -> BasicReceiptFilter { let firehose_filter = firehose_filter[0].clone(); assert_eq!( diff --git a/chain/near/src/chain.rs b/chain/near/src/chain.rs index 7bf2b50a6a8..6f25c64589a 100644 --- a/chain/near/src/chain.rs +++ b/chain/near/src/chain.rs @@ -1,22 +1,16 @@ -use anyhow::anyhow; use async_trait::async_trait; use graph::blockchain::client::ChainClient; use graph::blockchain::firehose_block_ingestor::FirehoseBlockIngestor; -use graph::blockchain::substreams_block_stream::SubstreamsBlockStream; use graph::blockchain::{ - BasicBlockchainBuilder, BlockIngestor, BlockchainBuilder, BlockchainKind, NoopDecoderHook, - NoopRuntimeAdapter, Trigger, TriggerFilterWrapper, + BlockIngestor, BlockchainKind, NoopDecoderHook, NoopRuntimeAdapter, TriggerFilterWrapper, }; use graph::cheap_clone::CheapClone; use graph::components::network_provider::ChainName; use graph::components::store::{ChainHeadStore, DeploymentCursorTracker, SourceableStore}; use graph::data::subgraph::UnifiedMappingApiVersion; -use graph::env::EnvVars; -use graph::firehose::FirehoseEndpoint; +use graph::firehose::{FirehoseEndpoint, FirehoseEndpoints}; use graph::futures03::TryFutureExt; use graph::prelude::MetricsRegistry; -use graph::schema::InputSchema; -use graph::substreams::{Clock, Package}; use graph::{ anyhow::Result, blockchain::{ @@ -37,7 +31,6 @@ use std::collections::BTreeSet; use std::sync::Arc; use crate::adapter::TriggerFilter; -use crate::codec::substreams_triggers::BlockAndReceipts; use crate::codec::Block; use crate::data_source::{DataSourceTemplate, UnresolvedDataSourceTemplate}; use crate::trigger::{self, NearTrigger}; @@ -49,68 +42,10 @@ use graph::blockchain::block_stream::{ BlockStream, BlockStreamBuilder, BlockStreamError, BlockStreamMapper, FirehoseCursor, }; -const NEAR_FILTER_MODULE_NAME: &str = "near_filter"; -const SUBSTREAMS_TRIGGER_FILTER_BYTES: &[u8; 510162] = include_bytes!( - "../../../substreams/substreams-trigger-filter/substreams-trigger-filter-v0.1.0.spkg" -); - pub struct NearStreamBuilder {} #[async_trait] impl BlockStreamBuilder for NearStreamBuilder { - async fn build_substreams( - &self, - chain: &Chain, - _schema: InputSchema, - deployment: DeploymentLocator, - block_cursor: FirehoseCursor, - subgraph_current_block: Option, - filter: Arc<::TriggerFilter>, - ) -> Result>> { - let mapper = Arc::new(FirehoseMapper { - adapter: Arc::new(TriggersAdapter {}), - filter, - }); - let mut package = - Package::decode(SUBSTREAMS_TRIGGER_FILTER_BYTES.to_vec().as_ref()).unwrap(); - match package.modules.as_mut() { - Some(modules) => modules - .modules - .iter_mut() - .find(|module| module.name == NEAR_FILTER_MODULE_NAME) - .map(|module| { - graph::substreams::patch_module_params( - mapper.filter.to_module_params(), - module, - ); - module - }), - None => None, - }; - - let logger = chain - .logger_factory - .subgraph_logger(&deployment) - .new(o!("component" => "SubstreamsBlockStream")); - let start_block = subgraph_current_block - .as_ref() - .map(|b| b.number) - .unwrap_or_default(); - - Ok(Box::new(SubstreamsBlockStream::new( - deployment.hash, - chain.chain_client(), - subgraph_current_block, - block_cursor.clone(), - mapper, - package.modules.unwrap_or_default(), - NEAR_FILTER_MODULE_NAME.to_string(), - vec![start_block], - vec![], - logger, - chain.metrics_registry.clone(), - ))) - } async fn build_firehose( &self, chain: &Chain, @@ -169,7 +104,6 @@ pub struct Chain { chain_head_store: Arc, metrics_registry: Arc, block_stream_builder: Arc>, - prefer_substreams: bool, } impl std::fmt::Debug for Chain { @@ -178,17 +112,21 @@ impl std::fmt::Debug for Chain { } } -#[async_trait] -impl BlockchainBuilder for BasicBlockchainBuilder { - async fn build(self, config: &Arc) -> Chain { +impl Chain { + pub fn new( + logger_factory: LoggerFactory, + name: ChainName, + chain_head_store: Arc, + firehose_endpoints: FirehoseEndpoints, + metrics_registry: Arc, + ) -> Self { Chain { - logger_factory: self.logger_factory, - name: self.name, - chain_head_store: self.chain_head_store, - client: Arc::new(ChainClient::new_firehose(self.firehose_endpoints)), - metrics_registry: self.metrics_registry, + logger_factory, + name, + chain_head_store, + client: Arc::new(ChainClient::new_firehose(firehose_endpoints)), + metrics_registry, block_stream_builder: Arc::new(NearStreamBuilder {}), - prefer_substreams: config.prefer_substreams_block_streams, } } } @@ -237,20 +175,6 @@ impl Blockchain for Chain { filter: Arc>, unified_api_version: UnifiedMappingApiVersion, ) -> Result>, Error> { - if self.prefer_substreams { - return self - .block_stream_builder - .build_substreams( - self, - store.input_schema(), - deployment, - store.firehose_cursor(), - store.block_ptr(), - filter.chain_filter.clone(), - ) - .await; - } - self.block_stream_builder .build_firehose( self, @@ -460,44 +384,6 @@ impl BlockStreamMapper for FirehoseMapper { .await .map_err(BlockStreamError::from) } - - async fn handle_substreams_block( - &self, - _logger: &Logger, - _clock: Clock, - cursor: FirehoseCursor, - message: Vec, - ) -> Result, BlockStreamError> { - let BlockAndReceipts { - block, - outcome, - receipt, - } = BlockAndReceipts::decode(message.as_ref())?; - let block = block.ok_or_else(|| anyhow!("near block is mandatory on substreams"))?; - let arc_block = Arc::new(block.clone()); - - let trigger_data = outcome - .into_iter() - .zip(receipt.into_iter()) - .map(|(outcome, receipt)| { - Trigger::Chain(NearTrigger::Receipt(Arc::new( - trigger::ReceiptWithOutcome { - outcome, - receipt, - block: arc_block.clone(), - }, - ))) - }) - .collect(); - - Ok(BlockStreamEvent::ProcessBlock( - BlockWithTriggers { - block, - trigger_data, - }, - cursor, - )) - } } #[async_trait] diff --git a/chain/near/src/codec.rs b/chain/near/src/codec.rs index 6f0f2f7af4d..bbcfd6646a4 100644 --- a/chain/near/src/codec.rs +++ b/chain/near/src/codec.rs @@ -2,10 +2,6 @@ #[path = "protobuf/sf.near.codec.v1.rs"] pub mod pbcodec; -#[rustfmt::skip] -#[path = "protobuf/receipts.v1.rs"] -pub mod substreams_triggers; - use graph::{ blockchain::Block as BlockchainBlock, blockchain::{BlockPtr, BlockTime}, diff --git a/chain/near/src/protobuf/receipts.v1.rs b/chain/near/src/protobuf/receipts.v1.rs deleted file mode 100644 index 2b844103e9a..00000000000 --- a/chain/near/src/protobuf/receipts.v1.rs +++ /dev/null @@ -1,10 +0,0 @@ -// This file is @generated by prost-build. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockAndReceipts { - #[prost(message, optional, tag = "1")] - pub block: ::core::option::Option, - #[prost(message, repeated, tag = "2")] - pub outcome: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag = "3")] - pub receipt: ::prost::alloc::vec::Vec, -} diff --git a/chain/substreams/Cargo.toml b/chain/substreams/Cargo.toml deleted file mode 100644 index 57a5706aca1..00000000000 --- a/chain/substreams/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "graph-chain-substreams" -version.workspace = true -edition.workspace = true - -[build-dependencies] -tonic-build = { workspace = true } - -[dependencies] -async-trait = { workspace = true } -graph = { path = "../../graph" } -graph-runtime-wasm = { path = "../../runtime/wasm" } -serde = { workspace = true } -prost = { workspace = true } -prost-types = { workspace = true } -anyhow = "1.0" -hex = "0.4.3" -semver = "1.0.27" -base64 = "0.22.1" -tokio-stream = { workspace = true } - -[dev-dependencies] -tokio = { version = "1", features = ["full"] } diff --git a/chain/substreams/build.rs b/chain/substreams/build.rs deleted file mode 100644 index 330a01a8c68..00000000000 --- a/chain/substreams/build.rs +++ /dev/null @@ -1,8 +0,0 @@ -fn main() { - println!("cargo:rerun-if-changed=proto"); - tonic_build::configure() - .protoc_arg("--experimental_allow_proto3_optional") - .out_dir("src/protobuf") - .compile_protos(&["proto/codec.proto"], &["proto"]) - .expect("Failed to compile Substreams entity proto(s)"); -} diff --git a/chain/substreams/examples/README.md b/chain/substreams/examples/README.md deleted file mode 100644 index afd1882b337..00000000000 --- a/chain/substreams/examples/README.md +++ /dev/null @@ -1,13 +0,0 @@ -## Substreams example - -1. Set environmental variables -```bash -$> export SUBSTREAMS_API_TOKEN=your_sf_token -$> export SUBSTREAMS_ENDPOINT=your_sf_endpoint # you can also not define this one and use the default specified endpoint -$> export SUBSTREAMS_PACKAGE=path_to_your_spkg -``` - -2. Run `substreams` example -```bash -cargo run -p graph-chain-substreams --example substreams [module_name] # for graph entities run `graph_out` -``` diff --git a/chain/substreams/examples/substreams.rs b/chain/substreams/examples/substreams.rs deleted file mode 100644 index d2277580c37..00000000000 --- a/chain/substreams/examples/substreams.rs +++ /dev/null @@ -1,115 +0,0 @@ -use anyhow::{format_err, Context, Error}; -use graph::blockchain::block_stream::{BlockStreamEvent, FirehoseCursor}; -use graph::blockchain::client::ChainClient; -use graph::blockchain::substreams_block_stream::SubstreamsBlockStream; -use graph::endpoint::EndpointMetrics; -use graph::firehose::{FirehoseEndpoints, SubgraphLimit}; -use graph::prelude::{info, tokio, DeploymentHash, MetricsRegistry, Registry}; -use graph::{env::env_var, firehose::FirehoseEndpoint, log::logger, substreams}; -use graph_chain_substreams::mapper::Mapper; -use prost::Message; -use std::env; -use std::sync::Arc; -use tokio_stream::StreamExt; - -#[tokio::main] -async fn main() -> Result<(), Error> { - let module_name = env::args().nth(1).unwrap(); - - let token_env = env_var("SUBSTREAMS_API_TOKEN", "".to_string()); - let mut token: Option = None; - if !token_env.is_empty() { - token = Some(token_env); - } - - let endpoint = env_var( - "SUBSTREAMS_ENDPOINT", - "https://api.streamingfast.io".to_string(), - ); - - let package_file = env_var("SUBSTREAMS_PACKAGE", "".to_string()); - if package_file.is_empty() { - panic!("Environment variable SUBSTREAMS_PACKAGE must be set"); - } - - let package = read_package(&package_file)?; - - let logger = logger(true); - // Set up Prometheus registry - let prometheus_registry = Arc::new(Registry::new()); - let metrics_registry = Arc::new(MetricsRegistry::new( - logger.clone(), - prometheus_registry.clone(), - )); - - let endpoint_metrics = EndpointMetrics::new( - logger.clone(), - &[endpoint.clone()], - Arc::new(MetricsRegistry::mock()), - ); - - let firehose = Arc::new(FirehoseEndpoint::new( - "substreams", - &endpoint, - token, - None, - false, - false, - SubgraphLimit::Unlimited, - Arc::new(endpoint_metrics), - true, - )); - - let client = Arc::new(ChainClient::new_firehose(FirehoseEndpoints::for_testing( - vec![firehose], - ))); - - let mut stream: SubstreamsBlockStream = - SubstreamsBlockStream::new( - DeploymentHash::new("substreams".to_string()).unwrap(), - client, - None, - FirehoseCursor::None, - Arc::new(Mapper { - schema: None, - skip_empty_blocks: false, - }), - package.modules.clone().unwrap_or_default(), - module_name.to_string(), - vec![12369621], - vec![], - logger.clone(), - metrics_registry, - ); - - loop { - match stream.next().await { - None => { - break; - } - Some(event) => match event { - Err(_) => {} - Ok(block_stream_event) => match block_stream_event { - BlockStreamEvent::ProcessWasmBlock(_, _, _, _, _) => { - unreachable!("Cannot happen with this mapper") - } - BlockStreamEvent::Revert(_, _) => {} - BlockStreamEvent::ProcessBlock(block_with_trigger, _) => { - for change in block_with_trigger.block.changes.entity_changes { - for field in change.fields { - info!(&logger, "field: {:?}", field); - } - } - } - }, - }, - } - } - - Ok(()) -} - -fn read_package(file: &str) -> Result { - let content = std::fs::read(file).context(format_err!("read package {}", file))?; - substreams::Package::decode(content.as_ref()).context("decode command") -} diff --git a/chain/substreams/proto/codec.proto b/chain/substreams/proto/codec.proto deleted file mode 100644 index bd75e7f95c8..00000000000 --- a/chain/substreams/proto/codec.proto +++ /dev/null @@ -1,47 +0,0 @@ -syntax = "proto3"; - -package substreams.entity.v1; - -message EntityChanges { - repeated EntityChange entity_changes = 5; -} - -message EntityChange { - string entity = 1; - string id = 2; - uint64 ordinal = 3; - enum Operation { - UNSET = 0; // Protobuf default should not be used, this is used so that the consume can ensure that the value was actually specified - CREATE = 1; - UPDATE = 2; - DELETE = 3; - } - Operation operation = 4; - repeated Field fields = 5; -} - -message Value { - oneof typed { - int32 int32 = 1; - string bigdecimal = 2; - string bigint = 3; - string string = 4; - bytes bytes = 5; - bool bool = 6; - int64 timestamp = 7; - //reserved 8 to 9; // For future types - - Array array = 10; - } -} - -message Array { - repeated Value value = 1; -} - -message Field { - string name = 1; - optional Value new_value = 3; - optional Value old_value = 5; -} - diff --git a/chain/substreams/src/block_ingestor.rs b/chain/substreams/src/block_ingestor.rs deleted file mode 100644 index 46966e9e4eb..00000000000 --- a/chain/substreams/src/block_ingestor.rs +++ /dev/null @@ -1,204 +0,0 @@ -use std::{sync::Arc, time::Duration}; - -use crate::mapper::Mapper; -use anyhow::{Context, Error}; -use async_trait::async_trait; -use graph::blockchain::block_stream::{BlockStreamError, FirehoseCursor}; -use graph::blockchain::BlockchainKind; -use graph::blockchain::{ - client::ChainClient, substreams_block_stream::SubstreamsBlockStream, BlockIngestor, -}; -use graph::components::network_provider::ChainName; -use graph::components::store::ChainHeadStore; -use graph::prelude::MetricsRegistry; -use graph::slog::trace; -use graph::substreams::Package; -use graph::{ - blockchain::block_stream::BlockStreamEvent, - cheap_clone::CheapClone, - prelude::{error, info, DeploymentHash, Logger}, - util::backoff::ExponentialBackoff, -}; -use prost::Message; -use tokio_stream::StreamExt; - -const SUBSTREAMS_HEAD_TRACKER_BYTES: &[u8; 89935] = include_bytes!( - "../../../substreams/substreams-head-tracker/substreams-head-tracker-v1.0.0.spkg" -); - -pub struct SubstreamsBlockIngestor { - chain_store: Arc, - client: Arc>, - logger: Logger, - chain_name: ChainName, - metrics: Arc, -} - -impl SubstreamsBlockIngestor { - pub fn new( - chain_store: Arc, - client: Arc>, - logger: Logger, - chain_name: ChainName, - metrics: Arc, - ) -> SubstreamsBlockIngestor { - SubstreamsBlockIngestor { - chain_store, - client, - logger, - chain_name, - metrics, - } - } - - async fn fetch_head_cursor(&self) -> String { - let mut backoff = - ExponentialBackoff::new(Duration::from_millis(250), Duration::from_secs(30)); - loop { - match self.chain_store.clone().chain_head_cursor().await { - Ok(cursor) => return cursor.unwrap_or_default(), - Err(e) => { - error!(self.logger, "Fetching chain head cursor failed: {:#}", e); - - backoff.sleep_async().await; - } - } - } - } - - /// Consumes the incoming stream of blocks infinitely until it hits an error. In which case - /// the error is logged right away and the latest available cursor is returned - /// upstream for future consumption. - /// If an error is returned it indicates a fatal/deterministic error which should not be retried. - async fn process_blocks( - &self, - cursor: FirehoseCursor, - mut stream: SubstreamsBlockStream, - ) -> Result { - let mut latest_cursor = cursor; - - while let Some(message) = stream.next().await { - let (block, cursor) = match message { - Ok(BlockStreamEvent::ProcessWasmBlock( - _block_ptr, - _block_time, - _data, - _handler, - _cursor, - )) => { - unreachable!("Block ingestor should never receive raw blocks"); - } - Ok(BlockStreamEvent::ProcessBlock(triggers, cursor)) => { - (Arc::new(triggers.block), cursor) - } - Ok(BlockStreamEvent::Revert(_ptr, _cursor)) => { - trace!(self.logger, "Received undo block to ingest, skipping"); - continue; - } - Err(e) if e.is_deterministic() => { - return Err(e); - } - Err(e) => { - info!( - self.logger, - "An error occurred while streaming blocks: {}", e - ); - break; - } - }; - - let res = self.process_new_block(block, cursor.to_string()).await; - if let Err(e) = res { - error!(self.logger, "Process block failed: {:#}", e); - break; - } - - latest_cursor = cursor - } - - error!( - self.logger, - "Stream blocks complete unexpectedly, expecting stream to always stream blocks" - ); - - Ok(latest_cursor) - } - - async fn process_new_block( - &self, - block: Arc, - cursor: String, - ) -> Result<(), Error> { - trace!(self.logger, "Received new block to ingest {:?}", block); - - self.chain_store - .clone() - .set_chain_head(block, cursor) - .await - .context("Updating chain head")?; - - Ok(()) - } -} - -#[async_trait] -impl BlockIngestor for SubstreamsBlockIngestor { - async fn run(self: Box) { - let mapper = Arc::new(Mapper { - schema: None, - skip_empty_blocks: false, - }); - let mut latest_cursor = FirehoseCursor::from(self.fetch_head_cursor().await); - let mut backoff = - ExponentialBackoff::new(Duration::from_millis(250), Duration::from_secs(30)); - let package = Package::decode(SUBSTREAMS_HEAD_TRACKER_BYTES.to_vec().as_ref()).unwrap(); - - loop { - let stream = SubstreamsBlockStream::::new( - DeploymentHash::default(), - self.client.cheap_clone(), - None, - latest_cursor.clone(), - mapper.cheap_clone(), - package.modules.clone().unwrap_or_default(), - "map_blocks".to_string(), - vec![-1], - vec![], - self.logger.cheap_clone(), - self.metrics.cheap_clone(), - ); - - // Consume the stream of blocks until an error is hit - // If the error is retryable it will print the error and return the cursor - // therefore if we get an error here it has to be a fatal error. - // This is a bit brittle and should probably be improved at some point. - let res = self.process_blocks(latest_cursor.clone(), stream).await; - match res { - Ok(cursor) => { - if cursor.as_ref() != latest_cursor.as_ref() { - backoff.reset(); - latest_cursor = cursor; - } - } - Err(BlockStreamError::Fatal(e)) => { - error!( - self.logger, - "fatal error while ingesting substream blocks: {}", e - ); - return; - } - _ => unreachable!("Nobody should ever see this error message, something is wrong"), - } - - // If we reach this point, we must wait a bit before retrying - backoff.sleep_async().await; - } - } - - fn network_name(&self) -> ChainName { - self.chain_name.clone() - } - fn kind(&self) -> BlockchainKind { - BlockchainKind::Substreams - } -} diff --git a/chain/substreams/src/block_stream.rs b/chain/substreams/src/block_stream.rs deleted file mode 100644 index daad94bae20..00000000000 --- a/chain/substreams/src/block_stream.rs +++ /dev/null @@ -1,115 +0,0 @@ -use anyhow::Result; -use async_trait::async_trait; -use std::sync::Arc; - -use graph::{ - blockchain::{ - block_stream::{ - BlockStream, BlockStreamBuilder as BlockStreamBuilderTrait, FirehoseCursor, - }, - substreams_block_stream::SubstreamsBlockStream, - Blockchain, TriggerFilterWrapper, - }, - components::store::{DeploymentLocator, SourceableStore}, - data::subgraph::UnifiedMappingApiVersion, - prelude::{BlockNumber, BlockPtr}, - schema::InputSchema, - slog::o, -}; - -use crate::{ - mapper::{Mapper, WasmBlockMapper}, - Chain, TriggerFilter, -}; - -pub struct BlockStreamBuilder {} - -impl BlockStreamBuilder { - pub fn new() -> Self { - Self {} - } -} - -#[async_trait] -/// Substreams doesn't actually use Firehose, the configuration for firehose and the grpc substream -/// is very similar, so we can re-use the configuration and the builder for it. -/// This is probably something to improve but for now it works. -impl BlockStreamBuilderTrait for BlockStreamBuilder { - async fn build_substreams( - &self, - chain: &Chain, - schema: InputSchema, - deployment: DeploymentLocator, - block_cursor: FirehoseCursor, - subgraph_current_block: Option, - filter: Arc<::TriggerFilter>, - ) -> Result>> { - let logger = chain - .logger_factory - .subgraph_logger(&deployment) - .new(o!("component" => "SubstreamsBlockStream")); - - let stream = match &filter.mapping_handler { - Some(handler) => SubstreamsBlockStream::new( - deployment.hash, - chain.chain_client(), - subgraph_current_block, - block_cursor.clone(), - Arc::new(WasmBlockMapper { - handler: handler.clone(), - }), - filter.modules.clone().unwrap_or_default(), - filter.module_name.clone(), - filter.start_block.map(|x| vec![x]).unwrap_or_default(), - vec![], - logger, - chain.metrics_registry.clone(), - ), - - None => SubstreamsBlockStream::new( - deployment.hash, - chain.chain_client(), - subgraph_current_block, - block_cursor.clone(), - Arc::new(Mapper { - schema: Some(schema), - skip_empty_blocks: true, - }), - filter.modules.clone().unwrap_or_default(), - filter.module_name.clone(), - filter.start_block.map(|x| vec![x]).unwrap_or_default(), - vec![], - logger, - chain.metrics_registry.clone(), - ), - }; - - Ok(Box::new(stream)) - } - - async fn build_firehose( - &self, - _chain: &Chain, - _deployment: DeploymentLocator, - _block_cursor: FirehoseCursor, - _start_blocks: Vec, - _subgraph_current_block: Option, - _filter: Arc, - _unified_api_version: UnifiedMappingApiVersion, - ) -> Result>> { - unimplemented!() - } - - async fn build_polling( - &self, - _chain: &Chain, - _deployment: DeploymentLocator, - _start_blocks: Vec, - _source_subgraph_stores: Vec>, - _subgraph_current_block: Option, - _filter: Arc>, - _unified_api_version: UnifiedMappingApiVersion, - ) -> Result>> { - unimplemented!("polling block stream is not support for substreams") - } -} diff --git a/chain/substreams/src/chain.rs b/chain/substreams/src/chain.rs deleted file mode 100644 index 0213d01a39e..00000000000 --- a/chain/substreams/src/chain.rs +++ /dev/null @@ -1,231 +0,0 @@ -use crate::block_ingestor::SubstreamsBlockIngestor; -use crate::{data_source::*, EntityChanges, TriggerData, TriggerFilter, TriggersAdapter}; -use anyhow::Error; -use async_trait::async_trait; -use graph::blockchain::client::ChainClient; -use graph::blockchain::{ - BasicBlockchainBuilder, BlockIngestor, BlockTime, EmptyNodeCapabilities, NoopDecoderHook, - NoopRuntimeAdapter, TriggerFilterWrapper, -}; -use graph::components::network_provider::ChainName; -use graph::components::store::{ChainHeadStore, DeploymentCursorTracker, SourceableStore}; -use graph::env::EnvVars; -use graph::prelude::{BlockHash, CheapClone, Entity, LoggerFactory, MetricsRegistry}; -use graph::schema::EntityKey; -use graph::{ - blockchain::{ - self, - block_stream::{BlockStream, BlockStreamBuilder, FirehoseCursor}, - BlockPtr, Blockchain, BlockchainKind, IngestorError, RuntimeAdapter as RuntimeAdapterTrait, - }, - components::store::DeploymentLocator, - data::subgraph::UnifiedMappingApiVersion, - prelude::BlockNumber, - slog::Logger, -}; - -use std::sync::Arc; - -// ParsedChanges are an internal representation of the equivalent operations defined on the -// graph-out format used by substreams. -// Unset serves as a sentinel value, if for some reason an unknown value is sent or the value -// was empty then it's probably an unintended behaviour. This code was moved here for performance -// reasons, but the validation is still performed during trigger processing so while Unset will -// very likely just indicate an error somewhere, as far as the stream is concerned we just pass -// that along and let the downstream components deal with it. -#[derive(Debug, Clone)] -pub enum ParsedChanges { - Unset, - Delete(EntityKey), - Upsert { key: EntityKey, entity: Entity }, -} - -#[derive(Default, Debug, Clone)] -pub struct Block { - pub hash: BlockHash, - pub number: BlockNumber, - pub changes: EntityChanges, - pub parsed_changes: Vec, -} - -impl blockchain::Block for Block { - fn ptr(&self) -> BlockPtr { - BlockPtr { - hash: self.hash.clone(), - number: self.number, - } - } - - fn parent_ptr(&self) -> Option { - None - } - - fn timestamp(&self) -> BlockTime { - BlockTime::NONE - } -} - -pub struct Chain { - chain_head_store: Arc, - block_stream_builder: Arc>, - chain_id: ChainName, - - pub(crate) logger_factory: LoggerFactory, - pub(crate) client: Arc>, - pub(crate) metrics_registry: Arc, -} - -impl Chain { - pub fn new( - logger_factory: LoggerFactory, - chain_client: Arc>, - metrics_registry: Arc, - chain_store: Arc, - block_stream_builder: Arc>, - chain_id: ChainName, - ) -> Self { - Self { - logger_factory, - client: chain_client, - metrics_registry, - chain_head_store: chain_store, - block_stream_builder, - chain_id, - } - } -} - -impl std::fmt::Debug for Chain { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "chain: substreams") - } -} - -#[async_trait] -impl Blockchain for Chain { - const KIND: BlockchainKind = BlockchainKind::Substreams; - - type Client = (); - type Block = Block; - type DataSource = DataSource; - type UnresolvedDataSource = UnresolvedDataSource; - - type DataSourceTemplate = NoopDataSourceTemplate; - type UnresolvedDataSourceTemplate = NoopDataSourceTemplate; - - /// Trigger data as parsed from the triggers adapter. - type TriggerData = TriggerData; - - /// Decoded trigger ready to be processed by the mapping. - /// New implementations should have this be the same as `TriggerData`. - type MappingTrigger = TriggerData; - - /// Trigger filter used as input to the triggers adapter. - type TriggerFilter = TriggerFilter; - - type NodeCapabilities = EmptyNodeCapabilities; - - type DecoderHook = NoopDecoderHook; - - fn triggers_adapter( - &self, - _log: &DeploymentLocator, - _capabilities: &Self::NodeCapabilities, - _unified_api_version: UnifiedMappingApiVersion, - ) -> Result>, Error> { - Ok(Arc::new(TriggersAdapter {})) - } - - async fn new_block_stream( - &self, - deployment: DeploymentLocator, - store: impl DeploymentCursorTracker, - _start_blocks: Vec, - _source_subgraph_stores: Vec>, - filter: Arc>, - _unified_api_version: UnifiedMappingApiVersion, - ) -> Result>, Error> { - self.block_stream_builder - .build_substreams( - self, - store.input_schema(), - deployment, - store.firehose_cursor(), - store.block_ptr(), - filter.chain_filter.clone(), - ) - .await - } - - fn is_refetch_block_required(&self) -> bool { - false - } - async fn refetch_firehose_block( - &self, - _logger: &Logger, - _cursor: FirehoseCursor, - ) -> Result { - unimplemented!("This chain does not support Dynamic Data Sources. is_refetch_block_required always returns false, this shouldn't be called.") - } - - async fn chain_head_ptr(&self) -> Result, Error> { - self.chain_head_store.cheap_clone().chain_head_ptr().await - } - - async fn block_pointer_from_number( - &self, - _logger: &Logger, - number: BlockNumber, - ) -> Result { - // This is the same thing TriggersAdapter does, not sure if it's going to work but - // we also don't yet have a good way of getting this value until we sort out the - // chain store. - // TODO(filipe): Fix this once the chain_store is correctly setup for substreams. - Ok(BlockPtr { - hash: BlockHash::from(vec![0xff; 32]), - number, - }) - } - async fn runtime( - &self, - ) -> anyhow::Result<(Arc>, Self::DecoderHook)> { - Ok((Arc::new(NoopRuntimeAdapter::default()), NoopDecoderHook)) - } - - fn chain_client(&self) -> Arc> { - self.client.clone() - } - - async fn block_ingestor(&self) -> anyhow::Result> { - Ok(Box::new(SubstreamsBlockIngestor::new( - self.chain_head_store.cheap_clone(), - self.client.cheap_clone(), - self.logger_factory - .component_logger("SubstreamsBlockIngestor", None), - self.chain_id.clone(), - self.metrics_registry.cheap_clone(), - ))) - } -} - -#[async_trait] -impl blockchain::BlockchainBuilder for BasicBlockchainBuilder { - async fn build(self, _config: &Arc) -> Chain { - let BasicBlockchainBuilder { - logger_factory, - name, - chain_head_store, - firehose_endpoints, - metrics_registry, - } = self; - - Chain { - chain_head_store, - block_stream_builder: Arc::new(crate::BlockStreamBuilder::new()), - logger_factory, - client: Arc::new(ChainClient::new_firehose(firehose_endpoints)), - metrics_registry, - chain_id: name, - } - } -} diff --git a/chain/substreams/src/codec.rs b/chain/substreams/src/codec.rs deleted file mode 100644 index 31781baa201..00000000000 --- a/chain/substreams/src/codec.rs +++ /dev/null @@ -1,5 +0,0 @@ -#[rustfmt::skip] -#[path = "protobuf/substreams.entity.v1.rs"] -mod pbsubstreamsentity; - -pub use pbsubstreamsentity::*; diff --git a/chain/substreams/src/data_source.rs b/chain/substreams/src/data_source.rs deleted file mode 100644 index a30d92173c5..00000000000 --- a/chain/substreams/src/data_source.rs +++ /dev/null @@ -1,765 +0,0 @@ -use std::{collections::HashSet, sync::Arc}; - -use anyhow::{anyhow, Context, Error}; -use async_trait::async_trait; -use graph::{ - blockchain, - cheap_clone::CheapClone, - components::{ - link_resolver::{LinkResolver, LinkResolverContext}, - subgraph::InstanceDSTemplateInfo, - }, - data::subgraph::DeploymentHash, - prelude::{BlockNumber, Link}, - slog::Logger, -}; - -use prost::Message; -use serde::Deserialize; - -use crate::{chain::Chain, Block, TriggerData}; - -pub const SUBSTREAMS_KIND: &str = "substreams"; - -const DYNAMIC_DATA_SOURCE_ERROR: &str = "Substreams do not support dynamic data sources"; -const TEMPLATE_ERROR: &str = "Substreams do not support templates"; - -const ALLOWED_MAPPING_KIND: [&str; 1] = ["substreams/graph-entities"]; -const SUBSTREAMS_HANDLER_KIND: &str = "substreams"; -#[derive(Clone, Debug, PartialEq)] -/// Represents the DataSource portion of the manifest once it has been parsed -/// and the substream spkg has been downloaded + parsed. -pub struct DataSource { - pub kind: String, - pub network: Option, - pub name: String, - pub(crate) source: Source, - pub mapping: Mapping, - pub context: Arc>, - pub initial_block: Option, -} - -impl blockchain::DataSource for DataSource { - fn from_template_info( - _info: InstanceDSTemplateInfo, - _template: &graph::data_source::DataSourceTemplate, - ) -> Result { - Err(anyhow!("Substreams does not support templates")) - } - - fn address(&self) -> Option<&[u8]> { - None - } - - fn start_block(&self) -> BlockNumber { - self.initial_block.unwrap_or(0) - } - - fn end_block(&self) -> Option { - None - } - - fn name(&self) -> &str { - &self.name - } - - fn kind(&self) -> &str { - &self.kind - } - - fn network(&self) -> Option<&str> { - self.network.as_deref() - } - - fn context(&self) -> Arc> { - self.context.cheap_clone() - } - - fn creation_block(&self) -> Option { - None - } - - fn api_version(&self) -> semver::Version { - self.mapping.api_version.clone() - } - - fn runtime(&self) -> Option>> { - self.mapping.handler.as_ref().map(|h| h.runtime.clone()) - } - - fn handler_kinds(&self) -> HashSet<&str> { - // This is placeholder, substreams do not have a handler kind. - vec![SUBSTREAMS_HANDLER_KIND].into_iter().collect() - } - - // match_and_decode only seems to be used on the default trigger processor which substreams - // bypasses so it should be fine to leave it unimplemented. - fn match_and_decode( - &self, - _trigger: &TriggerData, - _block: &Arc, - _logger: &Logger, - ) -> Result>, Error> { - unimplemented!() - } - - fn is_duplicate_of(&self, _other: &Self) -> bool { - self == _other - } - - fn as_stored_dynamic_data_source(&self) -> graph::components::store::StoredDynamicDataSource { - unimplemented!("{}", DYNAMIC_DATA_SOURCE_ERROR) - } - - fn validate(&self, _: &semver::Version) -> Vec { - let mut errs = vec![]; - - if &self.kind != SUBSTREAMS_KIND { - errs.push(anyhow!( - "data source has invalid `kind`, expected {} but found {}", - SUBSTREAMS_KIND, - self.kind - )) - } - - if self.name.is_empty() { - errs.push(anyhow!("name cannot be empty")); - } - - if !ALLOWED_MAPPING_KIND.contains(&self.mapping.kind.as_str()) { - errs.push(anyhow!( - "mapping kind has to be one of {:?}, found {}", - ALLOWED_MAPPING_KIND, - self.mapping.kind - )) - } - - errs - } - - fn from_stored_dynamic_data_source( - _template: &::DataSourceTemplate, - _stored: graph::components::store::StoredDynamicDataSource, - ) -> Result { - Err(anyhow!(DYNAMIC_DATA_SOURCE_ERROR)) - } -} - -#[derive(Clone, Debug, Default, PartialEq)] -/// Module name comes from the manifest, package is the parsed spkg file. -pub struct Source { - pub module_name: String, - pub package: graph::substreams::Package, -} - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct Mapping { - pub api_version: semver::Version, - pub kind: String, - pub handler: Option, -} - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct MappingHandler { - pub handler: String, - pub runtime: Arc>, -} - -#[derive(Clone, Debug, Deserialize, PartialEq, Eq)] -/// Raw representation of the data source for deserialization purposes. -pub struct UnresolvedDataSource { - pub kind: String, - pub network: Option, - pub name: String, - pub(crate) source: UnresolvedSource, - pub mapping: UnresolvedMapping, -} - -#[derive(Clone, Debug, Default, Hash, Eq, PartialEq, Deserialize)] -#[serde(rename_all = "camelCase")] -/// Text api_version, before parsing and validation. -pub struct UnresolvedMapping { - pub api_version: String, - pub kind: String, - pub handler: Option, - pub file: Option, -} - -#[async_trait] -impl blockchain::UnresolvedDataSource for UnresolvedDataSource { - async fn resolve( - self, - deployment_hash: &DeploymentHash, - resolver: &Arc, - logger: &Logger, - _manifest_idx: u32, - _spec_version: &semver::Version, - ) -> Result { - let content = resolver - .cat( - &LinkResolverContext::new(deployment_hash, logger), - &self.source.package.file, - ) - .await?; - - let mut package = graph::substreams::Package::decode(content.as_ref())?; - - let module = match package.modules.as_mut() { - Some(modules) => modules - .modules - .iter_mut() - .find(|module| module.name == self.source.package.module_name) - .map(|module| { - if let Some(params) = self.source.package.params { - graph::substreams::patch_module_params(params, module); - } - module - }), - None => None, - }; - - let initial_block: Option = match module { - Some(module) => match &module.kind { - Some(graph::substreams::module::Kind::KindMap(_)) => Some(module.initial_block), - _ => { - return Err(anyhow!( - "Substreams module {} must be of 'map' kind", - module.name - )) - } - }, - None => { - return Err(anyhow!( - "Substreams module {} does not exist", - self.source.package.module_name - )) - } - }; - - let initial_block = - initial_block.map(|x| x.max(self.source.start_block.unwrap_or_default() as u64)); - - let initial_block: Option = initial_block - .map_or(Ok(None), |x: u64| TryInto::::try_into(x).map(Some)) - .map_err(anyhow::Error::from)?; - - let handler = match (self.mapping.handler, self.mapping.file) { - (Some(handler), Some(file)) => { - let module_bytes = resolver - .cat(&LinkResolverContext::new(deployment_hash, logger), &file) - .await - .with_context(|| format!("failed to resolve mapping {}", file.link))?; - - Some(MappingHandler { - handler, - runtime: Arc::new(module_bytes), - }) - } - _ => None, - }; - - Ok(DataSource { - kind: SUBSTREAMS_KIND.into(), - network: self.network, - name: self.name, - source: Source { - module_name: self.source.package.module_name, - package, - }, - mapping: Mapping { - api_version: semver::Version::parse(&self.mapping.api_version)?, - kind: self.mapping.kind, - handler, - }, - context: Arc::new(None), - initial_block, - }) - } -} - -#[derive(Clone, Debug, Default, Hash, Eq, PartialEq, Deserialize)] -#[serde(rename_all = "camelCase")] -/// Source is a part of the manifest and this is needed for parsing. -pub struct UnresolvedSource { - #[serde(rename = "startBlock", default)] - start_block: Option, - package: UnresolvedPackage, -} - -#[derive(Clone, Debug, Default, Hash, Eq, PartialEq, Deserialize)] -#[serde(rename_all = "camelCase")] -/// The unresolved Package section of the manifest. -pub struct UnresolvedPackage { - pub module_name: String, - pub file: Link, - pub params: Option, -} - -#[derive(Debug, Clone, Default, Deserialize)] -/// This is necessary for the Blockchain trait associated types, substreams do not support -/// data source templates so this is a noop and is not expected to be called. -pub struct NoopDataSourceTemplate {} - -impl blockchain::DataSourceTemplate for NoopDataSourceTemplate { - fn name(&self) -> &str { - unimplemented!("{}", TEMPLATE_ERROR); - } - - fn api_version(&self) -> semver::Version { - unimplemented!("{}", TEMPLATE_ERROR); - } - - fn runtime(&self) -> Option>> { - unimplemented!("{}", TEMPLATE_ERROR); - } - - fn manifest_idx(&self) -> u32 { - todo!() - } - - fn kind(&self) -> &str { - unimplemented!("{}", TEMPLATE_ERROR); - } -} - -#[async_trait] -impl blockchain::UnresolvedDataSourceTemplate for NoopDataSourceTemplate { - async fn resolve( - self, - _deployment_hash: &DeploymentHash, - _resolver: &Arc, - _logger: &Logger, - _manifest_idx: u32, - _spec_version: &semver::Version, - ) -> Result { - unimplemented!("{}", TEMPLATE_ERROR) - } -} - -#[cfg(test)] -mod test { - use std::{str::FromStr, sync::Arc}; - - use anyhow::Error; - use async_trait::async_trait; - use graph::{ - blockchain::{DataSource as _, UnresolvedDataSource as _}, - components::link_resolver::{LinkResolver, LinkResolverContext}, - data::subgraph::{DeploymentHash, LATEST_VERSION, SPEC_VERSION_1_2_0}, - prelude::{serde_yaml, JsonValueStream, Link}, - slog::{o, Discard, Logger}, - substreams::{ - module::{ - input::{Input, Params}, - Kind, KindMap, KindStore, - }, - Module, Modules, Package, - }, - }; - use prost::Message; - - use crate::{DataSource, Mapping, UnresolvedDataSource, UnresolvedMapping, SUBSTREAMS_KIND}; - - #[test] - fn parse_data_source() { - let ds: UnresolvedDataSource = serde_yaml::from_str(TEMPLATE_DATA_SOURCE).unwrap(); - let expected = UnresolvedDataSource { - kind: SUBSTREAMS_KIND.into(), - network: Some("mainnet".into()), - name: "Uniswap".into(), - source: crate::UnresolvedSource { - package: crate::UnresolvedPackage { - module_name: "output".into(), - file: Link { - link: "/ipfs/QmbHnhUFZa6qqqRyubUYhXntox1TCBxqryaBM1iNGqVJzT".into(), - }, - params: None, - }, - start_block: None, - }, - mapping: UnresolvedMapping { - api_version: "0.0.7".into(), - kind: "substreams/graph-entities".into(), - handler: None, - file: None, - }, - }; - assert_eq!(ds, expected); - } - - #[test] - fn parse_data_source_with_startblock() { - let ds: UnresolvedDataSource = - serde_yaml::from_str(TEMPLATE_DATA_SOURCE_WITH_START_BLOCK).unwrap(); - let expected = UnresolvedDataSource { - kind: SUBSTREAMS_KIND.into(), - network: Some("mainnet".into()), - name: "Uniswap".into(), - source: crate::UnresolvedSource { - package: crate::UnresolvedPackage { - module_name: "output".into(), - file: Link { - link: "/ipfs/QmbHnhUFZa6qqqRyubUYhXntox1TCBxqryaBM1iNGqVJzT".into(), - }, - params: None, - }, - start_block: Some(567), - }, - mapping: UnresolvedMapping { - api_version: "0.0.7".into(), - kind: "substreams/graph-entities".into(), - handler: None, - file: None, - }, - }; - assert_eq!(ds, expected); - } - - #[test] - fn parse_data_source_with_params() { - let ds: UnresolvedDataSource = - serde_yaml::from_str(TEMPLATE_DATA_SOURCE_WITH_PARAMS).unwrap(); - let expected = UnresolvedDataSource { - kind: SUBSTREAMS_KIND.into(), - network: Some("mainnet".into()), - name: "Uniswap".into(), - source: crate::UnresolvedSource { - package: crate::UnresolvedPackage { - module_name: "output".into(), - file: Link { - link: "/ipfs/QmbHnhUFZa6qqqRyubUYhXntox1TCBxqryaBM1iNGqVJzT".into(), - }, - params: Some("x\ny\n123\n".into()), - }, - start_block: None, - }, - mapping: UnresolvedMapping { - api_version: "0.0.7".into(), - kind: "substreams/graph-entities".into(), - handler: None, - file: None, - }, - }; - assert_eq!(ds, expected); - } - - #[graph::test] - async fn data_source_conversion() { - let ds: UnresolvedDataSource = serde_yaml::from_str(TEMPLATE_DATA_SOURCE).unwrap(); - let link_resolver: Arc = Arc::new(NoopLinkResolver {}); - let logger = Logger::root(Discard, o!()); - let ds: DataSource = ds - .resolve( - &DeploymentHash::default(), - &link_resolver, - &logger, - 0, - &SPEC_VERSION_1_2_0, - ) - .await - .unwrap(); - let expected = DataSource { - kind: SUBSTREAMS_KIND.into(), - network: Some("mainnet".into()), - name: "Uniswap".into(), - source: crate::Source { - module_name: "output".into(), - package: gen_package(), - }, - mapping: Mapping { - api_version: semver::Version::from_str("0.0.7").unwrap(), - kind: "substreams/graph-entities".into(), - handler: None, - }, - context: Arc::new(None), - initial_block: Some(123), - }; - assert_eq!(ds, expected); - } - - #[graph::test] - async fn data_source_conversion_override_params() { - let mut package = gen_package(); - let mut modules = package.modules.unwrap(); - modules.modules.get_mut(0).map(|module| { - module.inputs = vec![graph::substreams::module::Input { - input: Some(Input::Params(Params { - value: "x\ny\n123\n".into(), - })), - }] - }); - package.modules = Some(modules); - - let ds: UnresolvedDataSource = - serde_yaml::from_str(TEMPLATE_DATA_SOURCE_WITH_PARAMS).unwrap(); - let link_resolver: Arc = Arc::new(NoopLinkResolver {}); - let logger = Logger::root(Discard, o!()); - let ds: DataSource = ds - .resolve( - &DeploymentHash::default(), - &link_resolver, - &logger, - 0, - &SPEC_VERSION_1_2_0, - ) - .await - .unwrap(); - let expected = DataSource { - kind: SUBSTREAMS_KIND.into(), - network: Some("mainnet".into()), - name: "Uniswap".into(), - source: crate::Source { - module_name: "output".into(), - package, - }, - mapping: Mapping { - api_version: semver::Version::from_str("0.0.7").unwrap(), - kind: "substreams/graph-entities".into(), - handler: None, - }, - context: Arc::new(None), - initial_block: Some(123), - }; - assert_eq!(ds, expected); - } - - #[test] - fn data_source_validation() { - let mut ds = gen_data_source(); - assert_eq!(true, ds.validate(LATEST_VERSION).is_empty()); - - ds.network = None; - assert_eq!(true, ds.validate(LATEST_VERSION).is_empty()); - - ds.kind = "asdasd".into(); - ds.name = "".into(); - ds.mapping.kind = "asdasd".into(); - let errs: Vec = ds - .validate(LATEST_VERSION) - .into_iter() - .map(|e| e.to_string()) - .collect(); - assert_eq!( - errs, - vec![ - "data source has invalid `kind`, expected substreams but found asdasd", - "name cannot be empty", - "mapping kind has to be one of [\"substreams/graph-entities\"], found asdasd" - ] - ); - } - - #[test] - fn parse_data_source_with_maping() { - let ds: UnresolvedDataSource = - serde_yaml::from_str(TEMPLATE_DATA_SOURCE_WITH_MAPPING).unwrap(); - - let expected = UnresolvedDataSource { - kind: SUBSTREAMS_KIND.into(), - network: Some("mainnet".into()), - name: "Uniswap".into(), - source: crate::UnresolvedSource { - package: crate::UnresolvedPackage { - module_name: "output".into(), - file: Link { - link: "/ipfs/QmbHnhUFZa6qqqRyubUYhXntox1TCBxqryaBM1iNGqVJzT".into(), - }, - params: Some("x\ny\n123\n".into()), - }, - start_block: None, - }, - mapping: UnresolvedMapping { - api_version: "0.0.7".into(), - kind: "substreams/graph-entities".into(), - handler: Some("bananas".to_string()), - file: Some(Link { - link: "./src/mappings.ts".to_string(), - }), - }, - }; - assert_eq!(ds, expected); - } - - fn gen_package() -> Package { - Package { - proto_files: vec![], - version: 0, - modules: Some(Modules { - modules: vec![ - Module { - name: "output".into(), - initial_block: 123, - binary_entrypoint: "output".into(), - binary_index: 0, - kind: Some(Kind::KindMap(KindMap { - output_type: "proto".into(), - })), - block_filter: None, - inputs: vec![], - output: None, - }, - Module { - name: "store_mod".into(), - initial_block: 0, - binary_entrypoint: "store_mod".into(), - binary_index: 0, - kind: Some(Kind::KindStore(KindStore { - update_policy: 1, - value_type: "proto1".into(), - })), - block_filter: None, - inputs: vec![], - output: None, - }, - Module { - name: "map_mod".into(), - initial_block: 123456, - binary_entrypoint: "other2".into(), - binary_index: 0, - kind: Some(Kind::KindMap(KindMap { - output_type: "proto2".into(), - })), - block_filter: None, - inputs: vec![], - output: None, - }, - ], - binaries: vec![], - }), - module_meta: vec![], - package_meta: vec![], - sink_config: None, - network: "".into(), - sink_module: "".into(), - } - } - - fn gen_data_source() -> DataSource { - DataSource { - kind: SUBSTREAMS_KIND.into(), - network: Some("mainnet".into()), - name: "Uniswap".into(), - source: crate::Source { - module_name: "".to_string(), - package: gen_package(), - }, - mapping: Mapping { - api_version: semver::Version::from_str("0.0.7").unwrap(), - kind: "substreams/graph-entities".into(), - handler: None, - }, - context: Arc::new(None), - initial_block: None, - } - } - - const TEMPLATE_DATA_SOURCE: &str = r#" - kind: substreams - name: Uniswap - network: mainnet - source: - package: - moduleName: output - file: - /: /ipfs/QmbHnhUFZa6qqqRyubUYhXntox1TCBxqryaBM1iNGqVJzT - # This IPFs path would be generated from a local path at deploy time - mapping: - kind: substreams/graph-entities - apiVersion: 0.0.7 - "#; - - const TEMPLATE_DATA_SOURCE_WITH_START_BLOCK: &str = r#" - kind: substreams - name: Uniswap - network: mainnet - source: - startBlock: 567 - package: - moduleName: output - file: - /: /ipfs/QmbHnhUFZa6qqqRyubUYhXntox1TCBxqryaBM1iNGqVJzT - # This IPFs path would be generated from a local path at deploy time - mapping: - kind: substreams/graph-entities - apiVersion: 0.0.7 - "#; - - const TEMPLATE_DATA_SOURCE_WITH_MAPPING: &str = r#" - kind: substreams - name: Uniswap - network: mainnet - source: - package: - moduleName: output - file: - /: /ipfs/QmbHnhUFZa6qqqRyubUYhXntox1TCBxqryaBM1iNGqVJzT - # This IPFs path would be generated from a local path at deploy time - params: | - x - y - 123 - mapping: - kind: substreams/graph-entities - apiVersion: 0.0.7 - file: - /: ./src/mappings.ts - handler: bananas - "#; - - const TEMPLATE_DATA_SOURCE_WITH_PARAMS: &str = r#" - kind: substreams - name: Uniswap - network: mainnet - source: - package: - moduleName: output - file: - /: /ipfs/QmbHnhUFZa6qqqRyubUYhXntox1TCBxqryaBM1iNGqVJzT - # This IPFs path would be generated from a local path at deploy time - params: | - x - y - 123 - mapping: - kind: substreams/graph-entities - apiVersion: 0.0.7 - "#; - - #[derive(Debug)] - struct NoopLinkResolver {} - - #[async_trait] - impl LinkResolver for NoopLinkResolver { - fn with_timeout(&self, _timeout: std::time::Duration) -> Box { - unimplemented!() - } - - fn with_retries(&self) -> Box { - unimplemented!() - } - - fn for_manifest(&self, _manifest_path: &str) -> Result, Error> { - unimplemented!() - } - - async fn cat(&self, _ctx: &LinkResolverContext, _link: &Link) -> Result, Error> { - Ok(gen_package().encode_to_vec()) - } - - async fn get_block( - &self, - _ctx: &LinkResolverContext, - _link: &Link, - ) -> Result, Error> { - unimplemented!() - } - - async fn json_stream( - &self, - _ctx: &LinkResolverContext, - _link: &Link, - ) -> Result { - unimplemented!() - } - } -} diff --git a/chain/substreams/src/lib.rs b/chain/substreams/src/lib.rs deleted file mode 100644 index 664ceab6d65..00000000000 --- a/chain/substreams/src/lib.rs +++ /dev/null @@ -1,17 +0,0 @@ -mod block_stream; -mod chain; -mod codec; -mod data_source; -mod trigger; - -pub mod block_ingestor; -pub mod mapper; - -pub use crate::chain::Chain; -pub use block_stream::BlockStreamBuilder; -pub use chain::*; -pub use codec::EntityChanges; -pub use data_source::*; -pub use trigger::*; - -pub use codec::Field; diff --git a/chain/substreams/src/mapper.rs b/chain/substreams/src/mapper.rs deleted file mode 100644 index 78788186795..00000000000 --- a/chain/substreams/src/mapper.rs +++ /dev/null @@ -1,415 +0,0 @@ -use std::collections::HashMap; -use std::str::FromStr; - -use crate::codec::{entity_change, EntityChanges}; -use anyhow::{anyhow, Error}; -use async_trait::async_trait; -use graph::blockchain::block_stream::{ - BlockStreamError, BlockStreamEvent, BlockStreamMapper, BlockWithTriggers, FirehoseCursor, - SubstreamsError, -}; -use graph::blockchain::BlockTime; -use graph::data::store::scalar::{Bytes, Timestamp}; -use graph::data::store::IdType; -use graph::data::value::Word; -use graph::data_source::CausalityRegion; -use graph::prelude::{BigDecimal, BlockPtr}; -use graph::prelude::{BigInt, BlockHash, BlockNumber, Logger, Value}; -use graph::schema::InputSchema; -use graph::slog::error; -use graph::substreams::Clock; -use prost::Message; - -use crate::{Block, Chain, ParsedChanges, TriggerData}; - -// WasmBlockMapper will not perform any transformation to the block and cannot make assumptions -// about the block format. This mode just works a passthrough from the block stream to the subgraph -// mapping which will do the decoding and store actions. -pub struct WasmBlockMapper { - pub handler: String, -} - -#[async_trait] -impl BlockStreamMapper for WasmBlockMapper { - fn decode_block( - &self, - _output: Option<&[u8]>, - ) -> Result, BlockStreamError> { - unreachable!("WasmBlockMapper does not do block decoding") - } - - async fn block_with_triggers( - &self, - _logger: &Logger, - _block: crate::Block, - ) -> Result, BlockStreamError> { - unreachable!("WasmBlockMapper does not do trigger decoding") - } - - async fn handle_substreams_block( - &self, - logger: &Logger, - clock: Clock, - cursor: FirehoseCursor, - block: Vec, - ) -> Result, BlockStreamError> { - let Clock { - id, - number, - timestamp, - } = clock; - - let block_ptr = BlockPtr { - hash: BlockHash::from(id.into_bytes()), - number: BlockNumber::from(TryInto::::try_into(number).map_err(Error::from)?), - }; - - let block_data = block.into_boxed_slice(); - - // `timestamp` is an `Option`, but it should always be set - let timestamp = match timestamp { - None => { - error!(logger, - "Substream block is missing a timestamp"; - "cursor" => cursor.to_string(), - "number" => number, - ); - return Err(anyhow!( - "Substream block is missing a timestamp at cursor {cursor}, block number {number}" - )).map_err(BlockStreamError::from); - } - Some(ts) => BlockTime::since_epoch(ts.seconds, ts.nanos as u32), - }; - - Ok(BlockStreamEvent::ProcessWasmBlock( - block_ptr, - timestamp, - block_data, - self.handler.clone(), - cursor, - )) - } -} - -// Mapper will transform the proto content coming from substreams in the graph-out format -// into the internal Block representation. If schema is passed then additional transformation -// into from the substreams block representation is performed into the Entity model used by -// the store. If schema is None then only the original block is passed. This None should only -// be used for block ingestion where entity content is empty and gets discarded. -pub struct Mapper { - pub schema: Option, - // Block ingestors need the block to be returned so they can populate the cache - // block streams, however, can shave some time by just skipping. - pub skip_empty_blocks: bool, -} - -#[async_trait] -impl BlockStreamMapper for Mapper { - fn decode_block(&self, output: Option<&[u8]>) -> Result, BlockStreamError> { - let changes: EntityChanges = match output { - Some(msg) => Message::decode(msg).map_err(SubstreamsError::DecodingError)?, - None => EntityChanges { - entity_changes: [].to_vec(), - }, - }; - - let parsed_changes = match self.schema.as_ref() { - Some(schema) => parse_changes(&changes, schema)?, - None if self.skip_empty_blocks => return Ok(None), - None => vec![], - }; - - let hash = BlockHash::zero(); - let number = BlockNumber::MIN; - let block = Block { - hash, - number, - changes, - parsed_changes, - }; - - Ok(Some(block)) - } - - async fn block_with_triggers( - &self, - logger: &Logger, - block: Block, - ) -> Result, BlockStreamError> { - let mut triggers = vec![]; - if block.changes.entity_changes.len() >= 1 { - triggers.push(TriggerData {}); - } - - Ok(BlockWithTriggers::new(block, triggers, logger)) - } - - async fn handle_substreams_block( - &self, - logger: &Logger, - clock: Clock, - cursor: FirehoseCursor, - block: Vec, - ) -> Result, BlockStreamError> { - let block_number: BlockNumber = clock.number.try_into().map_err(Error::from)?; - let block_hash = clock.id.as_bytes().to_vec().into(); - - let block = self - .decode_block(Some(&block))? - .ok_or_else(|| anyhow!("expected block to not be empty"))?; - - let block = self.block_with_triggers(logger, block).await.map(|bt| { - let mut block = bt; - - block.block.number = block_number; - block.block.hash = block_hash; - block - })?; - - Ok(BlockStreamEvent::ProcessBlock(block, cursor)) - } -} - -fn parse_changes( - changes: &EntityChanges, - schema: &InputSchema, -) -> Result, SubstreamsError> { - let mut parsed_changes = vec![]; - for entity_change in changes.entity_changes.iter() { - let mut parsed_data: HashMap = HashMap::default(); - let entity_type = schema.entity_type(&entity_change.entity)?; - - // Make sure that the `entity_id` gets set to a value - // that is safe for roundtrips through the database. In - // particular, if the type of the id is `Bytes`, we have - // to make sure that the `entity_id` starts with `0x` as - // that will be what the key for such an entity have - // when it is read from the database. - // - // Needless to say, this is a very ugly hack, and the - // real fix is what's described in [this - // issue](https://github.com/graphprotocol/graph-node/issues/4663) - let entity_id: String = match entity_type.id_type()? { - IdType::String | IdType::Int8 => entity_change.id.clone(), - IdType::Bytes => { - if entity_change.id.starts_with("0x") { - entity_change.id.clone() - } else { - format!("0x{}", entity_change.id) - } - } - }; - // Substreams don't currently support offchain data - let key = entity_type.parse_key_in(Word::from(entity_id), CausalityRegion::ONCHAIN)?; - - let id = key.id_value(); - parsed_data.insert(Word::from("id"), id); - - let changes = match entity_change.operation() { - entity_change::Operation::Create | entity_change::Operation::Update => { - for field in entity_change.fields.iter() { - let new_value: &crate::codec::value::Typed = match &field.new_value { - Some(crate::codec::Value { - typed: Some(new_value), - }) => &new_value, - _ => continue, - }; - - let value: Value = decode_value(new_value)?; - *parsed_data - .entry(Word::from(field.name.as_str())) - .or_insert(Value::Null) = value; - } - let entity = schema.make_entity(parsed_data)?; - - ParsedChanges::Upsert { key, entity } - } - entity_change::Operation::Delete => ParsedChanges::Delete(key), - entity_change::Operation::Unset => ParsedChanges::Unset, - }; - parsed_changes.push(changes); - } - - Ok(parsed_changes) -} - -fn decode_value(value: &crate::codec::value::Typed) -> anyhow::Result { - use crate::codec::value::Typed; - use base64::prelude::*; - - match value { - Typed::Int32(new_value) => Ok(Value::Int(*new_value)), - - Typed::Bigdecimal(new_value) => BigDecimal::from_str(new_value) - .map(Value::BigDecimal) - .map_err(|err| anyhow::Error::from(err)), - - Typed::Bigint(new_value) => BigInt::from_str(new_value) - .map(Value::BigInt) - .map_err(|err| anyhow::Error::from(err)), - - Typed::String(new_value) => { - let mut string = new_value.clone(); - - // Strip null characters since they are not accepted by Postgres. - if string.contains('\u{0000}') { - string = string.replace('\u{0000}', ""); - } - Ok(Value::String(string)) - } - - Typed::Bytes(new_value) => BASE64_STANDARD - .decode(new_value) - .map(|bs| Value::Bytes(Bytes::from(bs))) - .map_err(|err| anyhow::Error::from(err)), - - Typed::Bool(new_value) => Ok(Value::Bool(*new_value)), - - Typed::Timestamp(new_value) => Timestamp::from_microseconds_since_epoch(*new_value) - .map(Value::Timestamp) - .map_err(|err| anyhow::Error::from(err)), - - Typed::Array(arr) => arr - .value - .iter() - .filter_map(|item| item.typed.as_ref().map(decode_value)) - .collect::>>() - .map(Value::List), - } -} - -#[cfg(test)] -mod test { - use std::{ops::Add, str::FromStr}; - - use super::decode_value; - use crate::codec::value::Typed; - use crate::codec::{Array, Value}; - use base64::prelude::*; - use graph::{ - data::store::scalar::{Bytes, Timestamp}, - prelude::{BigDecimal, BigInt, Value as GraphValue}, - }; - - #[test] - fn validate_substreams_field_types() { - struct Case { - name: String, - value: Value, - expected_value: GraphValue, - } - - let cases = vec![ - Case { - name: "string value".to_string(), - value: Value { - typed: Some(Typed::String( - "d4325ee72c39999e778a9908f5fb0803f78e30c441a5f2ce5c65eee0e0eba59d" - .to_string(), - )), - }, - expected_value: GraphValue::String( - "d4325ee72c39999e778a9908f5fb0803f78e30c441a5f2ce5c65eee0e0eba59d".to_string(), - ), - }, - Case { - name: "bytes value".to_string(), - value: Value { - typed: Some(Typed::Bytes( - BASE64_STANDARD.encode( - hex::decode( - "445247fe150195bd866516594e087e1728294aa831613f4d48b8ec618908519f", - ) - .unwrap(), - ) - .into_bytes(), - )), - }, - expected_value: GraphValue::Bytes( - Bytes::from_str( - "0x445247fe150195bd866516594e087e1728294aa831613f4d48b8ec618908519f", - ) - .unwrap(), - ), - }, - Case { - name: "int value for block".to_string(), - value: Value { - typed: Some(Typed::Int32(12369760)), - }, - expected_value: GraphValue::Int(12369760), - }, - Case { - name: "negative int value".to_string(), - value: Value { - typed: Some(Typed::Int32(-12369760)), - }, - expected_value: GraphValue::Int(-12369760), - }, - Case { - name: "big int".to_string(), - value: Value { - typed: Some(Typed::Bigint("123".to_string())), - }, - expected_value: GraphValue::BigInt(BigInt::from(123u64)), - }, - Case { - name: "big int > u64".to_string(), - value: Value { - typed: Some(Typed::Bigint( - BigInt::from(u64::MAX).add(BigInt::from(1)).to_string(), - )), - }, - expected_value: GraphValue::BigInt(BigInt::from(u64::MAX).add(BigInt::from(1))), - }, - Case { - name: "big decimal value".to_string(), - value: Value { - typed: Some(Typed::Bigdecimal("3133363633312e35".to_string())), - }, - expected_value: GraphValue::BigDecimal(BigDecimal::new( - BigInt::from(3133363633312u64), - 35, - )), - }, - Case { - name: "bool value".to_string(), - value: Value { - typed: Some(Typed::Bool(true)), - }, - expected_value: GraphValue::Bool(true), - }, - Case { - name: "timestamp value".to_string(), - value: Value { - typed: Some(Typed::Timestamp(1234565789)), - }, - expected_value: GraphValue::Timestamp(Timestamp::from_microseconds_since_epoch(1234565789).unwrap()), - }, - Case { - name: "string array".to_string(), - value: Value { - typed: Some(Typed::Array(Array { - value: vec![ - Value { - typed: Some(Typed::String("1".to_string())), - }, - Value { - typed: Some(Typed::String("2".to_string())), - }, - Value { - typed: Some(Typed::String("3".to_string())), - }, - ], - })), - }, - expected_value: GraphValue::List(vec!["1".into(), "2".into(), "3".into()]), - }, - ]; - - for case in cases.into_iter() { - let value: GraphValue = decode_value(&case.value.typed.unwrap()).unwrap(); - assert_eq!(case.expected_value, value, "failed case: {}", case.name) - } - } -} diff --git a/chain/substreams/src/protobuf/substreams.entity.v1.rs b/chain/substreams/src/protobuf/substreams.entity.v1.rs deleted file mode 100644 index 4077f281ad7..00000000000 --- a/chain/substreams/src/protobuf/substreams.entity.v1.rs +++ /dev/null @@ -1,107 +0,0 @@ -// This file is @generated by prost-build. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct EntityChanges { - #[prost(message, repeated, tag = "5")] - pub entity_changes: ::prost::alloc::vec::Vec, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct EntityChange { - #[prost(string, tag = "1")] - pub entity: ::prost::alloc::string::String, - #[prost(string, tag = "2")] - pub id: ::prost::alloc::string::String, - #[prost(uint64, tag = "3")] - pub ordinal: u64, - #[prost(enumeration = "entity_change::Operation", tag = "4")] - pub operation: i32, - #[prost(message, repeated, tag = "5")] - pub fields: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `EntityChange`. -pub mod entity_change { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Operation { - /// Protobuf default should not be used, this is used so that the consume can ensure that the value was actually specified - Unset = 0, - Create = 1, - Update = 2, - Delete = 3, - } - impl Operation { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Unset => "UNSET", - Self::Create => "CREATE", - Self::Update => "UPDATE", - Self::Delete => "DELETE", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNSET" => Some(Self::Unset), - "CREATE" => Some(Self::Create), - "UPDATE" => Some(Self::Update), - "DELETE" => Some(Self::Delete), - _ => None, - } - } - } -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Value { - #[prost(oneof = "value::Typed", tags = "1, 2, 3, 4, 5, 6, 7, 10")] - pub typed: ::core::option::Option, -} -/// Nested message and enum types in `Value`. -pub mod value { - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Typed { - #[prost(int32, tag = "1")] - Int32(i32), - #[prost(string, tag = "2")] - Bigdecimal(::prost::alloc::string::String), - #[prost(string, tag = "3")] - Bigint(::prost::alloc::string::String), - #[prost(string, tag = "4")] - String(::prost::alloc::string::String), - #[prost(bytes, tag = "5")] - Bytes(::prost::alloc::vec::Vec), - #[prost(bool, tag = "6")] - Bool(bool), - /// reserved 8 to 9; // For future types - #[prost(int64, tag = "7")] - Timestamp(i64), - #[prost(message, tag = "10")] - Array(super::Array), - } -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Array { - #[prost(message, repeated, tag = "1")] - pub value: ::prost::alloc::vec::Vec, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Field { - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - #[prost(message, optional, tag = "3")] - pub new_value: ::core::option::Option, - #[prost(message, optional, tag = "5")] - pub old_value: ::core::option::Option, -} diff --git a/chain/substreams/src/trigger.rs b/chain/substreams/src/trigger.rs deleted file mode 100644 index 6593a079970..00000000000 --- a/chain/substreams/src/trigger.rs +++ /dev/null @@ -1,255 +0,0 @@ -use anyhow::Error; -use async_trait::async_trait; -use graph::{ - blockchain::{ - self, block_stream::BlockWithTriggers, BlockPtr, EmptyNodeCapabilities, MappingTriggerTrait, - }, - components::{ - store::{DeploymentLocator, SubgraphFork}, - subgraph::{MappingError, ProofOfIndexingEvent, SharedProofOfIndexing}, - trigger_processor::HostedTrigger, - }, - prelude::{anyhow, BlockHash, BlockNumber, BlockState, CheapClone, RuntimeHostBuilder}, - slog::Logger, - substreams::Modules, -}; -use graph_runtime_wasm::module::ToAscPtr; -use std::{collections::BTreeSet, sync::Arc}; - -use crate::{Block, Chain, NoopDataSourceTemplate, ParsedChanges}; - -#[derive(Eq, PartialEq, PartialOrd, Ord, Debug)] -pub struct TriggerData {} - -impl MappingTriggerTrait for TriggerData { - fn error_context(&self) -> String { - "Failed to process substreams block".to_string() - } -} - -impl blockchain::TriggerData for TriggerData { - // TODO(filipe): Can this be improved with some data from the block? - fn error_context(&self) -> String { - "Failed to process substreams block".to_string() - } - - fn address_match(&self) -> Option<&[u8]> { - None - } -} - -#[async_trait] -impl ToAscPtr for TriggerData { - // substreams doesn't rely on wasm on the graph-node so this is not needed. - async fn to_asc_ptr( - self, - _heap: &mut H, - _gas: &graph::runtime::gas::GasCounter, - ) -> Result, graph::runtime::HostExportError> { - unimplemented!() - } -} - -#[derive(Debug, Clone, Default)] -pub struct TriggerFilter { - pub(crate) modules: Option, - pub(crate) module_name: String, - pub(crate) start_block: Option, - pub(crate) data_sources_len: u8, - // the handler to call for subgraph mappings, if this is set then the binary block content - // should be passed to the mappings. - pub(crate) mapping_handler: Option, -} - -#[cfg(debug_assertions)] -impl TriggerFilter { - pub fn modules(&self) -> &Option { - &self.modules - } - - pub fn module_name(&self) -> &str { - &self.module_name - } - - pub fn start_block(&self) -> &Option { - &self.start_block - } - - pub fn data_sources_len(&self) -> u8 { - self.data_sources_len - } -} - -// TriggerFilter should bypass all triggers and just rely on block since all the data received -// should already have been processed. -impl blockchain::TriggerFilter for TriggerFilter { - fn extend_with_template(&mut self, _data_source: impl Iterator) { - } - - /// this function is not safe to call multiple times, only one DataSource is supported for - /// - fn extend<'a>( - &mut self, - mut data_sources: impl Iterator + Clone, - ) { - let Self { - modules, - module_name, - start_block, - data_sources_len, - mapping_handler, - } = self; - - if *data_sources_len >= 1 { - return; - } - - if let Some(ds) = data_sources.next() { - *data_sources_len = 1; - *modules = ds.source.package.modules.clone(); - *module_name = ds.source.module_name.clone(); - *start_block = ds.initial_block; - *mapping_handler = ds.mapping.handler.as_ref().map(|h| h.handler.clone()); - } - } - - fn node_capabilities(&self) -> EmptyNodeCapabilities { - EmptyNodeCapabilities::default() - } - - fn to_firehose_filter(self) -> Vec { - unimplemented!("this should never be called for this type") - } -} - -pub struct TriggersAdapter {} - -#[async_trait] -impl blockchain::TriggersAdapter for TriggersAdapter { - async fn ancestor_block( - &self, - _ptr: BlockPtr, - _offset: BlockNumber, - _root: Option, - ) -> Result, Error> { - unimplemented!() - } - - async fn load_block_ptrs_by_numbers( - &self, - _logger: Logger, - _block_numbers: BTreeSet, - ) -> Result, Error> { - unimplemented!() - } - - async fn chain_head_ptr(&self) -> Result, Error> { - unimplemented!() - } - - async fn scan_triggers( - &self, - _from: BlockNumber, - _to: BlockNumber, - _filter: &TriggerFilter, - ) -> Result<(Vec>, BlockNumber), Error> { - unimplemented!() - } - - async fn triggers_in_block( - &self, - _logger: &Logger, - _block: Block, - _filter: &TriggerFilter, - ) -> Result, Error> { - unimplemented!() - } - - async fn is_on_main_chain(&self, _ptr: BlockPtr) -> Result { - unimplemented!() - } - - async fn parent_ptr(&self, block: &BlockPtr) -> Result, Error> { - // This seems to work for a lot of the firehose chains. - Ok(Some(BlockPtr { - hash: BlockHash::from(vec![0xff; 32]), - number: block.number.saturating_sub(1), - })) - } -} - -pub struct TriggerProcessor { - pub locator: DeploymentLocator, -} - -impl TriggerProcessor { - pub fn new(locator: DeploymentLocator) -> Self { - Self { locator } - } -} - -#[async_trait] -impl graph::prelude::TriggerProcessor for TriggerProcessor -where - T: RuntimeHostBuilder, -{ - async fn process_trigger<'a>( - &'a self, - logger: &Logger, - _: Vec>, - block: &Arc, - mut state: BlockState, - proof_of_indexing: &SharedProofOfIndexing, - causality_region: &str, - _debug_fork: &Option>, - _subgraph_metrics: &Arc, - _instrument: bool, - ) -> Result { - for parsed_change in block.parsed_changes.clone().into_iter() { - match parsed_change { - ParsedChanges::Unset => { - // Potentially an issue with the server side or - // we are running an outdated version. In either case we should abort. - return Err(MappingError::Unknown(anyhow!("Detected UNSET entity operation, either a server error or there's a new type of operation and we're running an outdated protobuf"))); - } - ParsedChanges::Upsert { key, entity } => { - proof_of_indexing.write_event( - &ProofOfIndexingEvent::SetEntity { - entity_type: key.entity_type.typename(), - id: &key.entity_id.to_string(), - data: &entity, - }, - causality_region, - logger, - ); - - state - .entity_cache - .set( - key, - entity, - block.number, - Some(&mut state.write_capacity_remaining), - ) - .await?; - } - ParsedChanges::Delete(entity_key) => { - let entity_type = entity_key.entity_type.cheap_clone(); - let id = entity_key.entity_id.clone(); - state.entity_cache.remove(entity_key); - - proof_of_indexing.write_event( - &ProofOfIndexingEvent::RemoveEntity { - entity_type: entity_type.typename(), - id: &id.to_string(), - }, - causality_region, - logger, - ); - } - } - } - - Ok(state) - } -} diff --git a/core/Cargo.toml b/core/Cargo.toml index 07c01a94d05..5d946ed1e6e 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -9,7 +9,6 @@ bytes = "1.0" graph = { path = "../graph" } graph-chain-ethereum = { path = "../chain/ethereum" } graph-chain-near = { path = "../chain/near" } -graph-chain-substreams = { path = "../chain/substreams" } graph-runtime-wasm = { path = "../runtime/wasm" } serde_yaml = { workspace = true } tokio = { workspace = true } diff --git a/core/src/subgraph/context/instance/mod.rs b/core/src/subgraph/context/instance/mod.rs index 0d14ae8d758..4ad491e786c 100644 --- a/core/src/subgraph/context/instance/mod.rs +++ b/core/src/subgraph/context/instance/mod.rs @@ -255,8 +255,4 @@ where pub fn hosts_len(&self) -> usize { self.onchain_hosts.len() + self.offchain_hosts.len() } - - pub fn first_host(&self) -> Option<&Arc> { - self.onchain_hosts.hosts().first() - } } diff --git a/core/src/subgraph/context/mod.rs b/core/src/subgraph/context/mod.rs index 846e0d6fefb..e6f485e2552 100644 --- a/core/src/subgraph/context/mod.rs +++ b/core/src/subgraph/context/mod.rs @@ -6,11 +6,8 @@ use crate::polling_monitor::{ use anyhow::{self, Error}; use bytes::Bytes; use graph::{ - blockchain::{BlockTime, Blockchain, TriggerFilterWrapper}, - components::{ - store::{DeploymentId, SubgraphFork}, - subgraph::{HostMetrics, MappingError, RuntimeHost as _, SharedProofOfIndexing}, - }, + blockchain::{Blockchain, TriggerFilterWrapper}, + components::{store::DeploymentId, subgraph::HostMetrics}, data::subgraph::SubgraphManifest, data_source::{ causality_region::CausalityRegionSeq, @@ -20,14 +17,13 @@ use graph::{ derive::CheapClone, ipfs::IpfsContext, prelude::{ - BlockNumber, BlockPtr, BlockState, CancelGuard, CheapClone, DeploymentHash, - MetricsRegistry, RuntimeHostBuilder, SubgraphCountMetric, SubgraphInstanceMetrics, - TriggerProcessor, + BlockNumber, CancelGuard, CheapClone, DeploymentHash, MetricsRegistry, RuntimeHostBuilder, + SubgraphCountMetric, TriggerProcessor, }, slog::Logger, }; +use std::collections::HashMap; use std::sync::{Arc, RwLock}; -use std::{collections::HashMap, time::Instant}; use tokio::sync::mpsc; use self::instance::SubgraphInstance; @@ -109,59 +105,6 @@ impl> IndexingContext { } } - pub async fn process_block( - &self, - logger: &Logger, - block_ptr: BlockPtr, - block_time: BlockTime, - block_data: Box<[u8]>, - handler: String, - mut state: BlockState, - proof_of_indexing: &SharedProofOfIndexing, - causality_region: &str, - debug_fork: &Option>, - subgraph_metrics: &Arc, - instrument: bool, - ) -> Result { - let error_count = state.deterministic_errors.len(); - - proof_of_indexing.start_handler(causality_region); - - let start = Instant::now(); - - // This flow is expected to have a single data source(and a corresponding host) which - // gets executed every block. - state = self - .instance - .first_host() - .expect("Expected this flow to have exactly one host") - .process_block( - logger, - block_ptr, - block_time, - block_data, - handler, - state, - proof_of_indexing.cheap_clone(), - debug_fork, - instrument, - ) - .await?; - - let elapsed = start.elapsed().as_secs_f64(); - subgraph_metrics.observe_trigger_processing_duration(elapsed); - - if state.deterministic_errors.len() != error_count { - assert!(state.deterministic_errors.len() == error_count + 1); - - // If a deterministic error has happened, write a new - // ProofOfIndexingEvent::DeterministicError to the SharedProofOfIndexing. - proof_of_indexing.write_deterministic_error(logger, causality_region); - } - - Ok(state) - } - /// Removes data sources hosts with a creation block greater or equal to `reverted_block`, so /// that they are no longer candidates for `process_trigger`. /// diff --git a/core/src/subgraph/instance_manager.rs b/core/src/subgraph/instance_manager.rs index 7706410a33b..7c8c0799660 100644 --- a/core/src/subgraph/instance_manager.rs +++ b/core/src/subgraph/instance_manager.rs @@ -122,23 +122,6 @@ where self.start_subgraph_inner(logger, loc, runner).await } - BlockchainKind::Substreams => { - let runner = instance_manager - .build_subgraph_runner::( - logger.clone(), - self.env_vars.cheap_clone(), - loc.cheap_clone(), - raw_manifest, - stop_block, - Box::new(graph_chain_substreams::TriggerProcessor::new( - loc.clone(), - )), - deployment_status_metric, - ) - .await?; - - self.start_subgraph_inner(logger, loc, runner).await - } } } }; diff --git a/core/src/subgraph/registrar.rs b/core/src/subgraph/registrar.rs index 234d43a35ae..f34df50ad88 100644 --- a/core/src/subgraph/registrar.rs +++ b/core/src/subgraph/registrar.rs @@ -336,25 +336,6 @@ where ) .await? } - BlockchainKind::Substreams => { - create_subgraph_version::( - &logger, - self.store.clone(), - self.chains.cheap_clone(), - name.clone(), - hash.cheap_clone(), - start_block_override, - graft_block_override, - raw, - node_id, - debug_fork, - self.version_switching_mode, - &resolver, - self.amp_client.cheap_clone(), - history_blocks, - ) - .await? - } }; debug!( diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index cca0e59e22b..f3b82f46b4c 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -47,7 +47,6 @@ const MINUTE: Duration = Duration::from_secs(60); const SKIP_PTR_UPDATES_THRESHOLD: Duration = Duration::from_secs(60 * 5); const HANDLE_REVERT_SECTION_NAME: &str = "handle_revert"; const PROCESS_BLOCK_SECTION_NAME: &str = "process_block"; -const PROCESS_WASM_BLOCK_SECTION_NAME: &str = "process_wasm_block"; const PROCESS_TRIGGERS_SECTION_NAME: &str = "process_triggers"; const HANDLE_CREATED_DS_SECTION_NAME: &str = "handle_new_data_sources"; @@ -869,37 +868,6 @@ where Ok(Arc::new(block)) } - async fn process_wasm_block( - &mut self, - proof_of_indexing: &SharedProofOfIndexing, - block_ptr: BlockPtr, - block_time: BlockTime, - block_data: Box<[u8]>, - handler: String, - causality_region: &str, - ) -> Result { - let block_state = BlockState::new( - self.inputs.store.clone(), - std::mem::take(&mut self.state.entity_lfu_cache), - ); - - self.ctx - .process_block( - &self.logger, - block_ptr, - block_time, - block_data, - handler, - block_state, - proof_of_indexing, - causality_region, - &self.inputs.debug_fork, - &self.metrics.subgraph, - self.inputs.instrument, - ) - .await - } - fn create_dynamic_data_sources( &mut self, created_data_sources: Vec, @@ -1171,20 +1139,6 @@ where ) -> Result { let stopwatch = &self.metrics.stream.stopwatch; let action = match event { - Some(Ok(BlockStreamEvent::ProcessWasmBlock( - block_ptr, - block_time, - data, - handler, - cursor, - ))) => { - let _section = stopwatch.start_section(PROCESS_WASM_BLOCK_SECTION_NAME); - let res = self - .handle_process_wasm_block(block_ptr.clone(), block_time, data, handler, cursor) - .await; - let start = Instant::now(); - self.handle_action(start, block_ptr, res).await? - } Some(Ok(BlockStreamEvent::ProcessBlock(block, cursor))) => { let _section = stopwatch.start_section(PROCESS_BLOCK_SECTION_NAME); self.handle_process_block(block, cursor).await? @@ -1335,82 +1289,6 @@ where C: Blockchain, T: RuntimeHostBuilder, { - async fn handle_process_wasm_block( - &mut self, - block_ptr: BlockPtr, - block_time: BlockTime, - block_data: Box<[u8]>, - handler: String, - cursor: FirehoseCursor, - ) -> Result { - let logger = self.logger.new(o!( - "block_number" => format!("{:?}", block_ptr.number), - "block_hash" => format!("{}", block_ptr.hash) - )); - - debug!(logger, "Start processing wasm block";); - - self.metrics - .stream - .deployment_head - .set(block_ptr.number as f64); - - let proof_of_indexing = - SharedProofOfIndexing::new(block_ptr.number, self.inputs.poi_version); - - // Causality region for onchain triggers. - let causality_region = PoICausalityRegion::from_network(&self.inputs.network); - - let block_state = { - match self - .process_wasm_block( - &proof_of_indexing, - block_ptr.clone(), - block_time, - block_data, - handler, - &causality_region, - ) - .await - { - // Triggers processed with no errors or with only deterministic errors. - Ok(block_state) => block_state, - - // Some form of unknown or non-deterministic error ocurred. - Err(MappingError::Unknown(e)) => return Err(ProcessingError::Unknown(e).into()), - Err(MappingError::PossibleReorg(e)) => { - info!(logger, - "Possible reorg detected, retrying"; - "error" => format!("{:#}", e), - ); - - // In case of a possible reorg, we want this function to do nothing and restart the - // block stream so it has a chance to detect the reorg. - // - // The state is unchanged at this point, except for having cleared the entity cache. - // Losing the cache is a bit annoying but not an issue for correctness. - // - // See also b21fa73b-6453-4340-99fb-1a78ec62efb1. - return Ok(Action::Restart); - } - } - }; - - self.transact_block_state( - &logger, - block_ptr.clone(), - cursor.clone(), - block_time, - block_state, - proof_of_indexing, - vec![], - vec![], - ) - .await?; - - Ok(Action::Continue) - } - async fn handle_process_block( &mut self, block: BlockWithTriggers, diff --git a/docs/config.md b/docs/config.md index feae397e911..8641398867c 100644 --- a/docs/config.md +++ b/docs/config.md @@ -112,7 +112,7 @@ The configuration for a chain `name` is specified in the section - `shard`: where chain data is stored - `protocol`: the protocol type being indexed, default `ethereum` -(alternatively `near`, `cosmos`,`arweave`,`starknet`) + (alternatively `near`, `cosmos`,`arweave`,`starknet`) - `polling_interval`: the polling interval for the block ingestor (default 500ms) - `provider`: a list of providers for that chain @@ -123,7 +123,7 @@ A `provider` is an object with the following characteristics: `details` includes the following: -- `type`: one of `web3` (default), `firehose`, `substreams` or `web3call` +- `type`: one of `web3` (default), `firehose`, or `web3call` - `transport`: one of `rpc`, `ws`, and `ipc`. Defaults to `rpc`. - `url`: the URL for the provider - `features`: an array of features that the provider supports, either empty @@ -135,8 +135,8 @@ A `provider` is an object with the following characteristics: otherwise `graph-node` might not be able to handle all subgraphs. The tracking for this is approximate, and a small amount of deviation from this value should be expected. The deviation will be less than 10. -- `token`: bearer token, for Firehose and Substreams providers -- `key`: API key for Firehose and Substreams providers when using key-based authentication +- `token`: bearer token, for Firehose providers +- `key`: API key for Firehose providers when using key-based authentication Note that for backwards compatibility, Web3 provider `details` can be specified at the "top level" of the `provider`. diff --git a/graph/Cargo.toml b/graph/Cargo.toml index 58cfba024c2..e8149e51086 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -37,7 +37,6 @@ hyper-util = { version = "0.1", features = ["full"] } futures01 = { package = "futures", version = "0.1.31" } lru_time_cache = "0.11" graphql-parser = "0.4.1" -humantime = "2.3.0" lazy_static = "1.5.0" num-bigint = { version = "=0.2.6", features = ["serde"] } num-integer = { version = "=0.1.46" } diff --git a/graph/build.rs b/graph/build.rs index d67e110edf4..ed4c31d077e 100644 --- a/graph/build.rs +++ b/graph/build.rs @@ -11,18 +11,4 @@ fn main() { &["proto"], ) .expect("Failed to compile Firehose proto(s)"); - - tonic_build::configure() - .protoc_arg("--experimental_allow_proto3_optional") - .out_dir("src/substreams") - .compile_protos(&["proto/substreams.proto"], &["proto"]) - .expect("Failed to compile Substreams proto(s)"); - - tonic_build::configure() - .protoc_arg("--experimental_allow_proto3_optional") - .extern_path(".sf.substreams.v1", "crate::substreams") - .extern_path(".sf.firehose.v2", "crate::firehose") - .out_dir("src/substreams_rpc") - .compile_protos(&["proto/substreams-rpc.proto"], &["proto"]) - .expect("Failed to compile Substreams RPC proto(s)"); } diff --git a/graph/proto/substreams-rpc.proto b/graph/proto/substreams-rpc.proto deleted file mode 100644 index 28298458480..00000000000 --- a/graph/proto/substreams-rpc.proto +++ /dev/null @@ -1,253 +0,0 @@ -syntax = "proto3"; - -package sf.substreams.rpc.v2; - -import "google/protobuf/any.proto"; -import "substreams.proto"; -import "firehose.proto"; - -service EndpointInfo { - rpc Info(sf.firehose.v2.InfoRequest) returns (sf.firehose.v2.InfoResponse); -} - -service Stream { rpc Blocks(Request) returns (stream Response); } - -message Request { - int64 start_block_num = 1; - string start_cursor = 2; - uint64 stop_block_num = 3; - - // With final_block_only, you only receive blocks that are irreversible: - // 'final_block_height' will be equal to current block and no 'undo_signal' - // will ever be sent - bool final_blocks_only = 4; - - // Substreams has two mode when executing your module(s) either development - // mode or production mode. Development and production modes impact the - // execution of Substreams, important aspects of execution include: - // * The time required to reach the first byte. - // * The speed that large ranges get executed. - // * The module logs and outputs sent back to the client. - // - // By default, the engine runs in developer mode, with richer and deeper - // output. Differences between production and development modes include: - // * Forward parallel execution is enabled in production mode and disabled in - // development mode - // * The time required to reach the first byte in development mode is faster - // than in production mode. - // - // Specific attributes of development mode include: - // * The client will receive all of the executed module's logs. - // * It's possible to request specific store snapshots in the execution tree - // (via `debug_initial_store_snapshot_for_modules`). - // * Multiple module's output is possible. - // - // With production mode`, however, you trade off functionality for high speed - // enabling forward parallel execution of module ahead of time. - bool production_mode = 5; - - string output_module = 6; - - sf.substreams.v1.Modules modules = 7; - - // Available only in developer mode - repeated string debug_initial_store_snapshot_for_modules = 10; -} - -message Response { - oneof message { - SessionInit session = 1; // Always sent first - ModulesProgress progress = 2; // Progress of data preparation, before - // sending in the stream of `data` events. - BlockScopedData block_scoped_data = 3; - BlockUndoSignal block_undo_signal = 4; - Error fatal_error = 5; - - // Available only in developer mode, and only if - // `debug_initial_store_snapshot_for_modules` is set. - InitialSnapshotData debug_snapshot_data = 10; - // Available only in developer mode, and only if - // `debug_initial_store_snapshot_for_modules` is set. - InitialSnapshotComplete debug_snapshot_complete = 11; - } -} - -// BlockUndoSignal informs you that every bit of data -// with a block number above 'last_valid_block' has been reverted -// on-chain. Delete that data and restart from 'last_valid_cursor' -message BlockUndoSignal { - sf.substreams.v1.BlockRef last_valid_block = 1; - string last_valid_cursor = 2; -} - -message BlockScopedData { - MapModuleOutput output = 1; - sf.substreams.v1.Clock clock = 2; - string cursor = 3; - - // Non-deterministic, allows substreams-sink to let go of their undo data. - uint64 final_block_height = 4; - - repeated MapModuleOutput debug_map_outputs = 10; - repeated StoreModuleOutput debug_store_outputs = 11; -} - -message SessionInit { - string trace_id = 1; - uint64 resolved_start_block = 2; - uint64 linear_handoff_block = 3; - uint64 max_parallel_workers = 4; -} - -message InitialSnapshotComplete { string cursor = 1; } - -message InitialSnapshotData { - string module_name = 1; - repeated StoreDelta deltas = 2; - uint64 sent_keys = 4; - uint64 total_keys = 3; -} - -message MapModuleOutput { - string name = 1; - google.protobuf.Any map_output = 2; - // DebugOutputInfo is available in non-production mode only - OutputDebugInfo debug_info = 10; -} - -// StoreModuleOutput are produced for store modules in development mode. -// It is not possible to retrieve store models in production, with -// parallelization enabled. If you need the deltas directly, write a pass -// through mapper module that will get them down to you. -message StoreModuleOutput { - string name = 1; - repeated StoreDelta debug_store_deltas = 2; - OutputDebugInfo debug_info = 10; -} - -message OutputDebugInfo { - repeated string logs = 1; - // LogsTruncated is a flag that tells you if you received all the logs or if - // they were truncated because you logged too much (fixed limit currently is - // set to 128 KiB). - bool logs_truncated = 2; - bool cached = 3; -} - -// ModulesProgress is a message that is sent every 500ms -message ModulesProgress { - // previously: repeated ModuleProgress modules = 1; - // these previous `modules` messages were sent in bursts and are not sent - // anymore. - reserved 1; - // List of jobs running on tier2 servers - repeated Job running_jobs = 2; - // Execution statistics for each module - repeated ModuleStats modules_stats = 3; - // Stages definition and completed block ranges - repeated Stage stages = 4; - - ProcessedBytes processed_bytes = 5; -} - -message ProcessedBytes { - uint64 total_bytes_read = 1; - uint64 total_bytes_written = 2; -} - -message Error { - string module = 1; - string reason = 2; - repeated string logs = 3; - // FailureLogsTruncated is a flag that tells you if you received all the logs - // or if they were truncated because you logged too much (fixed limit - // currently is set to 128 KiB). - bool logs_truncated = 4; -} - -message Job { - uint32 stage = 1; - uint64 start_block = 2; - uint64 stop_block = 3; - uint64 processed_blocks = 4; - uint64 duration_ms = 5; -} - -message Stage { - repeated string modules = 1; - repeated BlockRange completed_ranges = 2; -} - -// ModuleStats gathers metrics and statistics from each module, running on tier1 -// or tier2 All the 'count' and 'time_ms' values may include duplicate for each -// stage going over that module -message ModuleStats { - // name of the module - string name = 1; - - // total_processed_blocks is the sum of blocks sent to that module code - uint64 total_processed_block_count = 2; - // total_processing_time_ms is the sum of all time spent running that module - // code - uint64 total_processing_time_ms = 3; - - //// external_calls are chain-specific intrinsics, like "Ethereum RPC calls". - repeated ExternalCallMetric external_call_metrics = 4; - - // total_store_operation_time_ms is the sum of all time spent running that - // module code waiting for a store operation (ex: read, write, delete...) - uint64 total_store_operation_time_ms = 5; - // total_store_read_count is the sum of all the store Read operations called - // from that module code - uint64 total_store_read_count = 6; - - // total_store_write_count is the sum of all store Write operations called - // from that module code (store-only) - uint64 total_store_write_count = 10; - - // total_store_deleteprefix_count is the sum of all store DeletePrefix - // operations called from that module code (store-only) note that DeletePrefix - // can be a costly operation on large stores - uint64 total_store_deleteprefix_count = 11; - - // store_size_bytes is the uncompressed size of the full KV store for that - // module, from the last 'merge' operation (store-only) - uint64 store_size_bytes = 12; - - // total_store_merging_time_ms is the time spent merging partial stores into a - // full KV store for that module (store-only) - uint64 total_store_merging_time_ms = 13; - - // store_currently_merging is true if there is a merging operation (partial - // store to full KV store) on the way. - bool store_currently_merging = 14; - - // highest_contiguous_block is the highest block in the highest merged full KV - // store of that module (store-only) - uint64 highest_contiguous_block = 15; -} - -message ExternalCallMetric { - string name = 1; - uint64 count = 2; - uint64 time_ms = 3; -} - -message StoreDelta { - enum Operation { - UNSET = 0; - CREATE = 1; - UPDATE = 2; - DELETE = 3; - } - Operation operation = 1; - uint64 ordinal = 2; - string key = 3; - bytes old_value = 4; - bytes new_value = 5; -} - -message BlockRange { - uint64 start_block = 2; - uint64 end_block = 3; -} diff --git a/graph/proto/substreams.proto b/graph/proto/substreams.proto deleted file mode 100644 index 16db52419aa..00000000000 --- a/graph/proto/substreams.proto +++ /dev/null @@ -1,163 +0,0 @@ -syntax = "proto3"; - -package sf.substreams.v1; - -import "google/protobuf/timestamp.proto"; -import "google/protobuf/descriptor.proto"; -import "google/protobuf/any.proto"; - -message Package { - // Needs to be one so this file can be used _directly_ as a - // buf `Image` andor a ProtoSet for grpcurl and other tools - repeated google.protobuf.FileDescriptorProto proto_files = 1; - reserved 2 to 4; // Reserved for future: in case protosets adds fields - - uint64 version = 5; - sf.substreams.v1.Modules modules = 6; - repeated ModuleMetadata module_meta = 7; - repeated PackageMetadata package_meta = 8; - - // Source network for Substreams to fetch its data from. - string network = 9; - - google.protobuf.Any sink_config = 10; - string sink_module = 11; -} - -message PackageMetadata { - string version = 1; - string url = 2; - string name = 3; - string doc = 4; -} - -message ModuleMetadata { - // Corresponds to the index in `Package.metadata.package_meta` - uint64 package_index = 1; - string doc = 2; -} - -message Modules { - repeated Module modules = 1; - repeated Binary binaries = 2; -} - -// Binary represents some code compiled to its binary form. -message Binary { - string type = 1; - bytes content = 2; -} - -message Module { - string name = 1; - oneof kind { - KindMap kind_map = 2; - KindStore kind_store = 3; - KindBlockIndex kind_block_index = 10; - }; - - uint32 binary_index = 4; - string binary_entrypoint = 5; - - repeated Input inputs = 6; - Output output = 7; - - uint64 initial_block = 8; - - BlockFilter block_filter = 9; - - message BlockFilter { - string module = 1; - oneof query { - string query_string = 2; - QueryFromParams query_from_params = 3; - }; - } - - message QueryFromParams {} - - message KindMap { - string output_type = 1; - } - - message KindStore { - // The `update_policy` determines the functions available to mutate the store - // (like `set()`, `set_if_not_exists()` or `sum()`, etc..) in - // order to ensure that parallel operations are possible and deterministic - // - // Say a store cumulates keys from block 0 to 1M, and a second store - // cumulates keys from block 1M to 2M. When we want to use this - // store as a dependency for a downstream module, we will merge the - // two stores according to this policy. - UpdatePolicy update_policy = 1; - string value_type = 2; - - enum UpdatePolicy { - UPDATE_POLICY_UNSET = 0; - // Provides a store where you can `set()` keys, and the latest key wins - UPDATE_POLICY_SET = 1; - // Provides a store where you can `set_if_not_exists()` keys, and the first key wins - UPDATE_POLICY_SET_IF_NOT_EXISTS = 2; - // Provides a store where you can `add_*()` keys, where two stores merge by summing its values. - UPDATE_POLICY_ADD = 3; - // Provides a store where you can `min_*()` keys, where two stores merge by leaving the minimum value. - UPDATE_POLICY_MIN = 4; - // Provides a store where you can `max_*()` keys, where two stores merge by leaving the maximum value. - UPDATE_POLICY_MAX = 5; - // Provides a store where you can `append()` keys, where two stores merge by concatenating the bytes in order. - UPDATE_POLICY_APPEND = 6; - // Provides a store with both `set()` and `sum()` functions. - UPDATE_POLICY_SET_SUM = 7; - } - } - - message KindBlockIndex { - string output_type = 1; - } - - message Input { - oneof input { - Source source = 1; - Map map = 2; - Store store = 3; - Params params = 4; - } - - message Source { - string type = 1; // ex: "sf.ethereum.type.v1.Block" - } - message Map { - string module_name = 1; // ex: "block_to_pairs" - } - message Store { - string module_name = 1; - Mode mode = 2; - - enum Mode { - UNSET = 0; - GET = 1; - DELTAS = 2; - } - } - message Params { - string value = 1; - } - } - - message Output { - string type = 1; - } -} - -// Clock is a pointer to a block with added timestamp -message Clock { - string id = 1; - uint64 number = 2; - google.protobuf.Timestamp timestamp = 3; -} - -// BlockRef is a pointer to a block to which we don't know the timestamp -message BlockRef { - string id = 1; - uint64 number = 2; -} diff --git a/graph/src/blockchain/block_stream.rs b/graph/src/blockchain/block_stream.rs index e3568345803..7d87bba9398 100644 --- a/graph/src/blockchain/block_stream.rs +++ b/graph/src/blockchain/block_stream.rs @@ -1,34 +1,26 @@ use crate::blockchain::SubgraphFilter; use crate::data_source::{subgraph, CausalityRegion}; -use crate::substreams::Clock; -use crate::substreams_rpc::response::Message as SubstreamsMessage; -use crate::substreams_rpc::BlockScopedData; use anyhow::Error; use async_stream::stream; use async_trait::async_trait; use futures03::Stream; -use prost_types::Any; use std::collections::{BTreeMap, BTreeSet, HashMap}; use std::fmt; use std::sync::Arc; -use std::time::Instant; use thiserror::Error; use tokio::sync::mpsc::{self, Receiver, Sender}; -use super::substreams_block_stream::SubstreamsLogData; -use super::{Block, BlockPtr, BlockTime, Blockchain, Trigger, TriggerFilterWrapper}; +use super::{Block, BlockPtr, Blockchain, Trigger, TriggerFilterWrapper}; use crate::anyhow::Result; use crate::components::store::{BlockNumber, DeploymentLocator, SourceableStore}; use crate::data::subgraph::UnifiedMappingApiVersion; use crate::firehose::{self, FirehoseEndpoint}; use crate::futures03::stream::StreamExt as _; use crate::schema::{EntityType, InputSchema}; -use crate::substreams_rpc::response::Message; use crate::{prelude::*, prometheus::labels}; pub const BUFFERED_BLOCK_STREAM_SIZE: usize = 100; pub const FIREHOSE_BUFFER_STREAM_SIZE: usize = 1; -pub const SUBSTREAMS_BUFFER_STREAM_SIZE: usize = 100; pub struct BufferedBlockStream { inner: Pin, BlockStreamError>> + Send>>, @@ -133,16 +125,6 @@ pub trait BlockStreamBuilder: Send + Sync { unified_api_version: UnifiedMappingApiVersion, ) -> Result>>; - async fn build_substreams( - &self, - chain: &C, - schema: InputSchema, - deployment: DeploymentLocator, - block_cursor: FirehoseCursor, - subgraph_current_block: Option, - filter: Arc, - ) -> Result>>; - async fn build_polling( &self, chain: &C, @@ -695,102 +677,6 @@ pub trait BlockStreamMapper: Send + Sync { logger: &Logger, block: C::Block, ) -> Result, BlockStreamError>; - - async fn handle_substreams_block( - &self, - logger: &Logger, - clock: Clock, - cursor: FirehoseCursor, - block: Vec, - ) -> Result, BlockStreamError>; - - async fn to_block_stream_event( - &self, - logger: &mut Logger, - message: Option, - log_data: &mut SubstreamsLogData, - ) -> Result>, BlockStreamError> { - match message { - Some(SubstreamsMessage::Session(session_init)) => { - info!( - &logger, - "Received session init"; - "session" => format!("{:?}", session_init), - ); - log_data.trace_id = session_init.trace_id; - return Ok(None); - } - Some(SubstreamsMessage::BlockUndoSignal(undo)) => { - let valid_block = match undo.last_valid_block { - Some(clock) => clock, - None => return Err(BlockStreamError::from(SubstreamsError::InvalidUndoError)), - }; - let valid_ptr = BlockPtr { - hash: valid_block.id.trim_start_matches("0x").try_into()?, - number: valid_block.number as i32, - }; - log_data.last_seen_block = valid_block.number; - return Ok(Some(BlockStreamEvent::Revert( - valid_ptr, - FirehoseCursor::from(undo.last_valid_cursor.clone()), - ))); - } - - Some(SubstreamsMessage::BlockScopedData(block_scoped_data)) => { - let BlockScopedData { - output, - clock, - cursor, - final_block_height: _, - debug_map_outputs: _, - debug_store_outputs: _, - } = block_scoped_data; - - let module_output = match output { - Some(out) => out, - None => return Ok(None), - }; - - let clock = match clock { - Some(clock) => clock, - None => return Err(BlockStreamError::from(SubstreamsError::MissingClockError)), - }; - - let value = match module_output.map_output { - Some(Any { type_url: _, value }) => value, - None => return Ok(None), - }; - - log_data.last_seen_block = clock.number; - let cursor = FirehoseCursor::from(cursor); - - let event = self - .handle_substreams_block(&logger, clock, cursor, value) - .await?; - - Ok(Some(event)) - } - - Some(SubstreamsMessage::Progress(progress)) => { - if log_data.last_progress.elapsed() > Duration::from_secs(30) { - info!(&logger, "{}", log_data.info_string(&progress); "trace_id" => &log_data.trace_id); - debug!(&logger, "{}", log_data.debug_string(&progress); "trace_id" => &log_data.trace_id); - trace!( - &logger, - "Received progress update"; - "progress" => format!("{:?}", progress), - "trace_id" => &log_data.trace_id, - ); - log_data.last_progress = Instant::now(); - } - Ok(None) - } - - // ignoring Progress messages and SessionInit - // We are only interested in Data and Undo signals - _ => Ok(None), - } - } } #[derive(Error, Debug)] @@ -813,58 +699,10 @@ impl From for FirehoseError { } } -#[derive(Error, Debug)] -pub enum SubstreamsError { - #[error("response is missing the clock information")] - MissingClockError, - - #[error("invalid undo message")] - InvalidUndoError, - - #[error("entity validation failed {0}")] - EntityValidationError(#[from] crate::data::store::EntityValidationError), - - /// We were unable to decode the received block payload into the chain specific Block struct (e.g. chain_ethereum::pb::Block) - #[error("received gRPC block payload cannot be decoded: {0}")] - DecodingError(#[from] prost::DecodeError), - - /// Some unknown error occurred - #[error("unknown error {0}")] - UnknownError(#[from] anyhow::Error), - - #[error("multiple module output error")] - MultipleModuleOutputError, - - #[error("module output was not available (none) or wrong data provided")] - ModuleOutputNotPresentOrUnexpected, - - #[error("unexpected store delta output")] - UnexpectedStoreDeltaOutput, -} - -impl SubstreamsError { - pub fn is_deterministic(&self) -> bool { - use SubstreamsError::*; - - match self { - EntityValidationError(_) => true, - MissingClockError - | InvalidUndoError - | DecodingError(_) - | UnknownError(_) - | MultipleModuleOutputError - | ModuleOutputNotPresentOrUnexpected - | UnexpectedStoreDeltaOutput => false, - } - } -} - #[derive(Debug, Error)] pub enum BlockStreamError { #[error("Failed to decode protobuf {0}")] ProtobufDecodingError(#[from] prost::DecodeError), - #[error("substreams error: {0}")] - SubstreamsError(#[from] SubstreamsError), #[error("block stream error {0}")] Unknown(#[from] anyhow::Error), #[error("block stream fatal error {0}")] @@ -883,7 +721,6 @@ pub enum BlockStreamEvent { Revert(BlockPtr, FirehoseCursor), ProcessBlock(BlockWithTriggers, FirehoseCursor), - ProcessWasmBlock(BlockPtr, BlockTime, Box<[u8]>, String, FirehoseCursor), } #[derive(Clone)] diff --git a/graph/src/blockchain/builder.rs b/graph/src/blockchain/builder.rs deleted file mode 100644 index 943586770c5..00000000000 --- a/graph/src/blockchain/builder.rs +++ /dev/null @@ -1,30 +0,0 @@ -use tonic::async_trait; - -use super::Blockchain; -use crate::{ - components::store::ChainHeadStore, - data::value::Word, - env::EnvVars, - firehose::FirehoseEndpoints, - prelude::{LoggerFactory, MetricsRegistry}, -}; -use std::sync::Arc; - -/// An implementor of [`BlockchainBuilder`] for chains that don't require -/// particularly fancy builder logic. -pub struct BasicBlockchainBuilder { - pub logger_factory: LoggerFactory, - pub name: Word, - pub chain_head_store: Arc, - pub firehose_endpoints: FirehoseEndpoints, - pub metrics_registry: Arc, -} - -/// Something that can build a [`Blockchain`]. -#[async_trait] -pub trait BlockchainBuilder -where - C: Blockchain, -{ - async fn build(self, config: &Arc) -> C; -} diff --git a/graph/src/blockchain/mod.rs b/graph/src/blockchain/mod.rs index 5066f38ac54..f8358a209c1 100644 --- a/graph/src/blockchain/mod.rs +++ b/graph/src/blockchain/mod.rs @@ -3,14 +3,12 @@ //! trait which is the centerpiece of this module. pub mod block_stream; -mod builder; pub mod client; mod empty_node_capabilities; pub mod firehose_block_ingestor; pub mod firehose_block_stream; pub mod mock; mod noop_runtime_adapter; -pub mod substreams_block_stream; mod types; // Try to reexport most of the necessary types @@ -50,7 +48,6 @@ use std::{ use web3::types::H256; pub use block_stream::{ChainHeadUpdateListener, ChainHeadUpdateStream, TriggersAdapter}; -pub use builder::{BasicBlockchainBuilder, BlockchainBuilder}; pub use empty_node_capabilities::EmptyNodeCapabilities; pub use noop_runtime_adapter::NoopRuntimeAdapter; pub use types::{BlockHash, BlockPtr, BlockTime, ChainIdentifier, ExtendedBlockPtr}; @@ -571,8 +568,6 @@ pub enum BlockchainKind { /// NEAR chains (Mainnet, Testnet) or chains that are compatible Near, - - Substreams, } impl fmt::Display for BlockchainKind { @@ -580,7 +575,6 @@ impl fmt::Display for BlockchainKind { let value = match self { BlockchainKind::Ethereum => "ethereum", BlockchainKind::Near => "near", - BlockchainKind::Substreams => "substreams", }; write!(f, "{}", value) } @@ -593,7 +587,6 @@ impl FromStr for BlockchainKind { match s { "ethereum" => Ok(BlockchainKind::Ethereum), "near" => Ok(BlockchainKind::Near), - "substreams" => Ok(BlockchainKind::Substreams), "subgraph" => Ok(BlockchainKind::Ethereum), // TODO(krishna): We should detect the blockchain kind from the source subgraph "amp" => Ok(BlockchainKind::Ethereum), // TODO: Maybe get this from the Amp server _ => Err(anyhow!("unknown blockchain kind {}", s)), diff --git a/graph/src/blockchain/substreams_block_stream.rs b/graph/src/blockchain/substreams_block_stream.rs deleted file mode 100644 index c359ec1a504..00000000000 --- a/graph/src/blockchain/substreams_block_stream.rs +++ /dev/null @@ -1,433 +0,0 @@ -use super::block_stream::{ - BlockStreamError, BlockStreamMapper, FirehoseCursor, SUBSTREAMS_BUFFER_STREAM_SIZE, -}; -use super::client::ChainClient; -use crate::blockchain::block_stream::{BlockStream, BlockStreamEvent}; -use crate::blockchain::Blockchain; -use crate::firehose::ConnectionHeaders; -use crate::prelude::*; -use crate::substreams::Modules; -use crate::substreams_rpc::{ModulesProgress, Request, Response}; -use crate::util::backoff::ExponentialBackoff; -use async_stream::try_stream; -use futures03::{Stream, StreamExt}; -use humantime::format_duration; -use std::sync::Arc; -use std::task::{Context, Poll}; -use std::time::{Duration, Instant}; -use tonic::{Code, Status}; - -struct SubstreamsBlockStreamMetrics { - deployment: DeploymentHash, - restarts: CounterVec, - connect_duration: GaugeVec, - time_between_responses: HistogramVec, - responses: CounterVec, -} - -impl SubstreamsBlockStreamMetrics { - pub fn new(registry: Arc, deployment: DeploymentHash) -> Self { - Self { - deployment, - restarts: registry - .global_counter_vec( - "deployment_substreams_blockstream_restarts", - "Counts the number of times a Substreams block stream is (re)started", - vec!["deployment", "provider", "success"].as_slice(), - ) - .unwrap(), - - connect_duration: registry - .global_gauge_vec( - "deployment_substreams_blockstream_connect_duration", - "Measures the time it takes to connect a Substreams block stream", - vec!["deployment", "provider"].as_slice(), - ) - .unwrap(), - - time_between_responses: registry - .global_histogram_vec( - "deployment_substreams_blockstream_time_between_responses", - "Measures the time between receiving and processing Substreams stream responses", - vec!["deployment", "provider"].as_slice(), - ) - .unwrap(), - - responses: registry - .global_counter_vec( - "deployment_substreams_blockstream_responses", - "Counts the number of responses received from a Substreams block stream", - vec!["deployment", "provider", "kind"].as_slice(), - ) - .unwrap(), - } - } - - fn observe_successful_connection(&self, time: &mut Instant, provider: &str) { - self.restarts - .with_label_values(&[self.deployment.as_str(), &provider, "true"]) - .inc(); - self.connect_duration - .with_label_values(&[self.deployment.as_str(), &provider]) - .set(time.elapsed().as_secs_f64()); - - // Reset last connection timestamp - *time = Instant::now(); - } - - fn observe_failed_connection(&self, time: &mut Instant, provider: &str) { - self.restarts - .with_label_values(&[self.deployment.as_str(), &provider, "false"]) - .inc(); - self.connect_duration - .with_label_values(&[self.deployment.as_str(), &provider]) - .set(time.elapsed().as_secs_f64()); - - // Reset last connection timestamp - *time = Instant::now(); - } - - fn observe_response(&self, kind: &str, time: &mut Instant, provider: &str) { - self.time_between_responses - .with_label_values(&[self.deployment.as_str(), &provider]) - .observe(time.elapsed().as_secs_f64()); - self.responses - .with_label_values(&[self.deployment.as_str(), &provider, kind]) - .inc(); - - // Reset last response timestamp - *time = Instant::now(); - } -} - -pub struct SubstreamsBlockStream { - //fixme: not sure if this is ok to be set as public, maybe - // we do not want to expose the stream to the caller - stream: Pin, BlockStreamError>> + Send>>, -} - -impl SubstreamsBlockStream -where - C: Blockchain, -{ - pub fn new( - deployment: DeploymentHash, - client: Arc>, - subgraph_current_block: Option, - cursor: FirehoseCursor, - mapper: Arc, - modules: Modules, - module_name: String, - start_blocks: Vec, - end_blocks: Vec, - logger: Logger, - registry: Arc, - ) -> Self - where - F: BlockStreamMapper + 'static, - { - let manifest_start_block_num = start_blocks.into_iter().min().unwrap_or(0); - - let manifest_end_block_num = end_blocks.into_iter().min().unwrap_or(0); - - let metrics = SubstreamsBlockStreamMetrics::new(registry, deployment.clone()); - - SubstreamsBlockStream { - stream: Box::pin(stream_blocks( - client, - cursor, - deployment, - mapper, - modules, - module_name, - manifest_start_block_num, - manifest_end_block_num, - subgraph_current_block, - logger, - metrics, - )), - } - } -} - -fn stream_blocks>( - client: Arc>, - cursor: FirehoseCursor, - deployment: DeploymentHash, - mapper: Arc, - modules: Modules, - module_name: String, - manifest_start_block_num: BlockNumber, - manifest_end_block_num: BlockNumber, - subgraph_current_block: Option, - logger: Logger, - metrics: SubstreamsBlockStreamMetrics, -) -> impl Stream, BlockStreamError>> { - let mut latest_cursor = cursor.to_string(); - - let start_block_num = subgraph_current_block - .as_ref() - .map(|ptr| { - // current_block has already been processed, we start at next block - ptr.block_number() as i64 + 1 - }) - .unwrap_or(manifest_start_block_num as i64); - - let stop_block_num = manifest_end_block_num as u64; - - let headers = ConnectionHeaders::new().with_deployment(deployment.clone()); - - // Back off exponentially whenever we encounter a connection error or a stream with bad data - let mut backoff = ExponentialBackoff::new(Duration::from_millis(500), Duration::from_secs(45)); - - let mut log_data = SubstreamsLogData::new(); - - try_stream! { - // This attribute is needed because `try_stream!` seems to break detection of `skip_backoff` assignments - #[allow(unused_assignments)] - let mut skip_backoff = false; - - if !modules.modules.iter().any(|m| module_name.eq(&m.name)) { - Err(BlockStreamError::Fatal(format!( - "module `{}` not found", - module_name - )))?; - } - - let endpoint = client.firehose_endpoint().await?; - let mut logger = logger.new(o!("deployment" => deployment.clone(), "provider" => endpoint.provider.to_string())); - - loop { - // We just reconnected, assume that we want to back off on errors - skip_backoff = false; - - let mut connect_start = Instant::now(); - let request = Request { - start_block_num, - start_cursor: latest_cursor.to_string(), - stop_block_num, - modules: Some(modules.clone()), - output_module: module_name.clone(), - production_mode: true, - ..Default::default() - }; - - - let result = endpoint.clone().substreams(request, &headers).await; - - match result { - Ok(stream) => { - info!(&logger, "Blockstreams connected"); - - // Track the time it takes to set up the block stream - metrics.observe_successful_connection(&mut connect_start, &endpoint.provider); - - let mut last_response_time = Instant::now(); - let mut expected_stream_end = false; - - for await response in stream{ - match process_substreams_response( - response, - mapper.as_ref(), - &mut logger, - &mut log_data, - ).await { - Ok(block_response) => { - match block_response { - None => {} - Some(BlockResponse::Proceed(event, cursor)) => { - // Reset backoff because we got a good value from the stream - backoff.reset(); - - metrics.observe_response("proceed", &mut last_response_time, &endpoint.provider); - - yield event; - - latest_cursor = cursor; - } - } - }, - Err(BlockStreamError::SubstreamsError(e)) if e.is_deterministic() => - Err(BlockStreamError::Fatal(e.to_string()))?, - - Err(BlockStreamError::Fatal(msg)) => - Err(BlockStreamError::Fatal(msg))?, - - Err(err) => { - - info!(&logger, "received err"); - // We have an open connection but there was an error processing the Firehose - // response. We will reconnect the stream after this; this is the case where - // we actually _want_ to back off in case we keep running into the same error. - // An example of this situation is if we get invalid block or transaction data - // that cannot be decoded properly. - - metrics.observe_response("error", &mut last_response_time, &endpoint.provider); - - error!(logger, "{:#}", err); - expected_stream_end = true; - break; - } - } - } - - if !expected_stream_end { - error!(logger, "Stream blocks complete unexpectedly, expecting stream to always stream blocks"); - } - }, - Err(e) => { - // We failed to connect and will try again; this is another - // case where we actually _want_ to back off in case we keep - // having connection errors. - - metrics.observe_failed_connection(&mut connect_start, &endpoint.provider); - - error!(logger, "Unable to connect to endpoint: {:#}", e); - } - } - - // If we reach this point, we must wait a bit before retrying, unless `skip_backoff` is true - if !skip_backoff { - backoff.sleep_async().await; - } - } - } -} - -enum BlockResponse { - Proceed(BlockStreamEvent, String), -} - -async fn process_substreams_response>( - result: Result, - mapper: &F, - logger: &mut Logger, - log_data: &mut SubstreamsLogData, -) -> Result>, BlockStreamError> { - let response = match result { - Ok(v) => v, - Err(e) => { - if e.code() == Code::InvalidArgument { - return Err(BlockStreamError::Fatal(e.message().to_string())); - } - - return Err(BlockStreamError::from(anyhow!( - "An error occurred while streaming blocks: {:#}", - e - ))); - } - }; - - match mapper - .to_block_stream_event(logger, response.message, log_data) - .await - .map_err(BlockStreamError::from)? - { - Some(event) => { - let cursor = match &event { - BlockStreamEvent::Revert(_, cursor) => cursor, - BlockStreamEvent::ProcessBlock(_, cursor) => cursor, - BlockStreamEvent::ProcessWasmBlock(_, _, _, _, cursor) => cursor, - } - .to_string(); - - return Ok(Some(BlockResponse::Proceed(event, cursor))); - } - None => Ok(None), // some progress responses are ignored within to_block_stream_event - } -} - -impl Stream for SubstreamsBlockStream { - type Item = Result, BlockStreamError>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.stream.poll_next_unpin(cx) - } -} - -impl BlockStream for SubstreamsBlockStream { - fn buffer_size_hint(&self) -> usize { - SUBSTREAMS_BUFFER_STREAM_SIZE - } -} - -pub struct SubstreamsLogData { - pub last_progress: Instant, - pub last_seen_block: u64, - pub trace_id: String, -} - -impl SubstreamsLogData { - fn new() -> SubstreamsLogData { - SubstreamsLogData { - last_progress: Instant::now(), - last_seen_block: 0, - trace_id: "".to_string(), - } - } - pub fn info_string(&self, progress: &ModulesProgress) -> String { - format!( - "Substreams backend graph_out last block is {}, {} stages, {} jobs", - self.last_seen_block, - progress.stages.len(), - progress.running_jobs.len() - ) - } - pub fn debug_string(&self, progress: &ModulesProgress) -> String { - let len = progress.stages.len(); - let mut stages_str = "".to_string(); - for i in (0..len).rev() { - let stage = &progress.stages[i]; - let range = if stage.completed_ranges.len() > 0 { - let b = stage.completed_ranges.iter().map(|x| x.end_block).min(); - format!(" up to {}", b.unwrap_or(0)) - } else { - "".to_string() - }; - let mlen = stage.modules.len(); - let module = if mlen == 0 { - "".to_string() - } else if mlen == 1 { - format!(" ({})", stage.modules[0]) - } else { - format!(" ({} +{})", stage.modules[mlen - 1], mlen - 1) - }; - if !stages_str.is_empty() { - stages_str.push_str(", "); - } - stages_str.push_str(&format!("#{}{}{}", i, range, module)); - } - let stage_str = if len > 0 { - format!(" Stages: [{}]", stages_str) - } else { - "".to_string() - }; - let mut jobs_str = "".to_string(); - let jlen = progress.running_jobs.len(); - for i in 0..jlen { - let job = &progress.running_jobs[i]; - if !jobs_str.is_empty() { - jobs_str.push_str(", "); - } - let duration_str = format_duration(Duration::from_millis(job.duration_ms)); - jobs_str.push_str(&format!( - "#{} on Stage {} @ {} | +{}|{} elapsed {}", - i, - job.stage, - job.start_block, - job.processed_blocks, - job.stop_block - job.start_block, - duration_str - )); - } - let job_str = if jlen > 0 { - format!(", Jobs: [{}]", jobs_str) - } else { - "".to_string() - }; - format!( - "Substreams backend graph_out last block is {},{}{}", - self.last_seen_block, stage_str, job_str, - ) - } -} diff --git a/graph/src/components/subgraph/host.rs b/graph/src/components/subgraph/host.rs index f43c6aa3c00..0cba98912c7 100644 --- a/graph/src/components/subgraph/host.rs +++ b/graph/src/components/subgraph/host.rs @@ -6,7 +6,6 @@ use anyhow::Error; use async_trait::async_trait; use futures01::sync::mpsc; -use crate::blockchain::BlockTime; use crate::components::metrics::gas::GasMetrics; use crate::components::store::SubgraphFork; use crate::data_source::{ @@ -71,19 +70,6 @@ pub trait RuntimeHost: Send + Sync + 'static { logger: &Logger, ) -> Result>>, Error>; - async fn process_block( - &self, - logger: &Logger, - block_ptr: BlockPtr, - block_time: BlockTime, - block_data: Box<[u8]>, - handler: String, - state: BlockState, - proof_of_indexing: SharedProofOfIndexing, - debug_fork: &Option>, - instrument: bool, - ) -> Result; - async fn process_mapping_trigger( &self, logger: &Logger, diff --git a/graph/src/data/subgraph/mod.rs b/graph/src/data/subgraph/mod.rs index f8047c0a807..43b2b8e89dc 100644 --- a/graph/src/data/subgraph/mod.rs +++ b/graph/src/data/subgraph/mod.rs @@ -61,8 +61,6 @@ use std::sync::Arc; use super::{graphql::IntoValue, value::Word}; -pub const SUBSTREAMS_KIND: &str = "substreams"; - /// Deserialize an Address (with or without '0x' prefix). fn deserialize_address<'de, D>(deserializer: D) -> Result, D::Error> where @@ -1215,14 +1213,6 @@ impl UnresolvedSubgraphManifest { })) .await?; - let is_substreams = data_sources.iter().any(|ds| ds.kind() == SUBSTREAMS_KIND); - if is_substreams && data_sources.len() > 1 { - return Err(anyhow!( - "A Substreams-based subgraph can only contain a single data source." - ) - .into()); - } - for ds in &data_sources { ensure!( semver::VersionReq::parse(&format!("<= {}", ENV_VARS.mappings.max_api_version)) diff --git a/graph/src/endpoint.rs b/graph/src/endpoint.rs index a9fdd99a98c..cc6c3da99c7 100644 --- a/graph/src/endpoint.rs +++ b/graph/src/endpoint.rs @@ -31,7 +31,6 @@ pub struct RequestLabels { #[derive(Clone)] pub enum ConnectionType { Firehose, - Substreams, Rpc, } @@ -39,7 +38,6 @@ impl Into<&str> for &ConnectionType { fn into(self) -> &'static str { match self { ConnectionType::Firehose => "firehose", - ConnectionType::Substreams => "substreams", ConnectionType::Rpc => "rpc", } } diff --git a/graph/src/env/mod.rs b/graph/src/env/mod.rs index 09657c041f5..91cb2355088 100644 --- a/graph/src/env/mod.rs +++ b/graph/src/env/mod.rs @@ -206,8 +206,6 @@ pub struct EnvVars { /// Set by the env var `GRAPH_EXPERIMENTAL_SUBGRAPH_SETTINGS` which should point /// to a file with subgraph-specific settings pub subgraph_settings: Option, - /// Whether to prefer substreams blocks streams over firehose when available. - pub prefer_substreams_block_streams: bool, /// Set by the flag `GRAPH_ENABLE_DIPS_METRICS`. Whether to enable /// gas metrics. Off by default. pub enable_dips_metrics: bool, @@ -355,7 +353,6 @@ impl EnvVars { enable_sql_queries: inner.enable_sql_queries.0, ingestor_polling_interval: Duration::from_millis(inner.ingestor_polling_interval), subgraph_settings: inner.subgraph_settings, - prefer_substreams_block_streams: inner.prefer_substreams_block_streams, enable_dips_metrics: inner.enable_dips_metrics.0, history_blocks_override: inner.history_blocks_override, min_history_blocks: inner @@ -553,11 +550,6 @@ struct Inner { ingestor_polling_interval: u64, #[envconfig(from = "GRAPH_EXPERIMENTAL_SUBGRAPH_SETTINGS")] subgraph_settings: Option, - #[envconfig( - from = "GRAPH_EXPERIMENTAL_PREFER_SUBSTREAMS_BLOCK_STREAMS", - default = "false" - )] - prefer_substreams_block_streams: bool, #[envconfig(from = "GRAPH_ENABLE_DIPS_METRICS", default = "false")] enable_dips_metrics: EnvVarBoolean, #[envconfig(from = "GRAPH_HISTORY_BLOCKS_OVERRIDE")] diff --git a/graph/src/firehose/endpoints.rs b/graph/src/firehose/endpoints.rs index b05390154ed..76fefd61797 100644 --- a/graph/src/firehose/endpoints.rs +++ b/graph/src/firehose/endpoints.rs @@ -1,4 +1,3 @@ -use crate::firehose::codec::InfoRequest; use crate::firehose::fetch_client::FetchClient; use crate::firehose::interceptors::AuthInterceptor; use crate::{ @@ -11,7 +10,6 @@ use crate::{ env::ENV_VARS, firehose::decode_firehose_block, prelude::{anyhow, debug, DeploymentHash}, - substreams_rpc, }; use anyhow::Context; use async_trait::async_trait; @@ -53,7 +51,6 @@ pub struct FirehoseEndpoint { pub filters_enabled: bool, pub compression_enabled: bool, pub subgraph_limit: SubgraphLimit, - is_substreams: bool, endpoint_metrics: Arc, channel: Channel, @@ -185,7 +182,6 @@ impl FirehoseEndpoint { compression_enabled: bool, subgraph_limit: SubgraphLimit, endpoint_metrics: Arc, - is_substreams_endpoint: bool, ) -> Self { let uri = url .as_ref() @@ -254,7 +250,6 @@ impl FirehoseEndpoint { subgraph_limit, endpoint_metrics, info_response: OnceCell::new(), - is_substreams: is_substreams_endpoint, } } @@ -337,51 +332,6 @@ impl FirehoseEndpoint { client } - fn new_substreams_info_client( - &self, - ) -> crate::substreams_rpc::endpoint_info_client::EndpointInfoClient< - InterceptedService, impl tonic::service::Interceptor>, - > { - let metrics = self.metrics_interceptor(); - - let mut client = - crate::substreams_rpc::endpoint_info_client::EndpointInfoClient::with_interceptor( - metrics, - self.auth.clone(), - ) - .accept_compressed(CompressionEncoding::Gzip); - - if self.compression_enabled { - client = client.send_compressed(CompressionEncoding::Gzip); - } - - client = client.max_decoding_message_size(self.max_message_size()); - - client - } - - fn new_substreams_streaming_client( - &self, - ) -> substreams_rpc::stream_client::StreamClient< - InterceptedService, impl tonic::service::Interceptor>, - > { - let metrics = self.metrics_interceptor(); - - let mut client = substreams_rpc::stream_client::StreamClient::with_interceptor( - metrics, - self.auth.clone(), - ) - .accept_compressed(CompressionEncoding::Gzip); - - if self.compression_enabled { - client = client.send_compressed(CompressionEncoding::Gzip); - } - - client = client.max_decoding_message_size(self.max_message_size()); - - client - } - pub async fn get_block( &self, cursor: FirehoseCursor, @@ -687,19 +637,6 @@ impl FirehoseEndpoint { Ok(block_stream) } - pub async fn substreams( - self: Arc, - request: substreams_rpc::Request, - headers: &ConnectionHeaders, - ) -> Result, anyhow::Error> { - let mut client = self.new_substreams_streaming_client(); - let request = headers.add_to_request(request); - let response_stream = client.blocks(request).await?; - let block_stream = response_stream.into_inner(); - - Ok(block_stream) - } - pub async fn info( self: Arc, ) -> Result { @@ -707,20 +644,9 @@ impl FirehoseEndpoint { self.info_response .get_or_try_init(move || async move { - if endpoint.is_substreams { - let mut client = endpoint.new_substreams_info_client(); + let mut client = endpoint.new_firehose_info_client(); - client - .info(InfoRequest {}) - .await - .map(|r| r.into_inner()) - .map_err(anyhow::Error::from) - .and_then(|e| e.try_into()) - } else { - let mut client = endpoint.new_firehose_info_client(); - - client.info().await - } + client.info().await }) .await .map(ToOwned::to_owned) @@ -808,7 +734,6 @@ mod test { false, SubgraphLimit::Unlimited, Arc::new(EndpointMetrics::mock()), - false, ))]; let endpoints = FirehoseEndpoints::for_testing(endpoint); @@ -841,7 +766,6 @@ mod test { false, SubgraphLimit::Limit(2), Arc::new(EndpointMetrics::mock()), - false, ))]; let endpoints = FirehoseEndpoints::for_testing(endpoint); @@ -869,7 +793,6 @@ mod test { false, SubgraphLimit::Disabled, Arc::new(EndpointMetrics::mock()), - false, ))]; let endpoints = FirehoseEndpoints::for_testing(endpoint); @@ -896,7 +819,6 @@ mod test { false, SubgraphLimit::Unlimited, endpoint_metrics.clone(), - false, )); let high_error_adapter2 = Arc::new(FirehoseEndpoint::new( "high_error".to_string(), @@ -907,7 +829,6 @@ mod test { false, SubgraphLimit::Unlimited, endpoint_metrics.clone(), - false, )); let low_availability = Arc::new(FirehoseEndpoint::new( "low availability".to_string(), @@ -918,7 +839,6 @@ mod test { false, SubgraphLimit::Limit(2), endpoint_metrics.clone(), - false, )); let high_availability = Arc::new(FirehoseEndpoint::new( "high availability".to_string(), @@ -929,7 +849,6 @@ mod test { false, SubgraphLimit::Unlimited, endpoint_metrics.clone(), - false, )); endpoint_metrics.report_for_test(&high_error_adapter1.provider, false); diff --git a/graph/src/lib.rs b/graph/src/lib.rs index 822adac8e44..0607cab5937 100644 --- a/graph/src/lib.rs +++ b/graph/src/lib.rs @@ -24,10 +24,6 @@ pub mod runtime; pub mod firehose; -pub mod substreams; - -pub mod substreams_rpc; - pub mod endpoint; pub mod schema; diff --git a/graph/src/substreams/codec.rs b/graph/src/substreams/codec.rs deleted file mode 100644 index 23edcc3b7c1..00000000000 --- a/graph/src/substreams/codec.rs +++ /dev/null @@ -1,5 +0,0 @@ -#[rustfmt::skip] -#[path = "sf.substreams.v1.rs"] -mod pbsubstreams; - -pub use pbsubstreams::*; diff --git a/graph/src/substreams/mod.rs b/graph/src/substreams/mod.rs deleted file mode 100644 index a09801b91ee..00000000000 --- a/graph/src/substreams/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -mod codec; - -pub use codec::*; - -use self::module::input::{Input, Params}; - -/// Replace all the existing params with the provided ones. -pub fn patch_module_params(params: String, module: &mut Module) { - let mut inputs = vec![crate::substreams::module::Input { - input: Some(Input::Params(Params { value: params })), - }]; - - inputs.extend(module.inputs.iter().flat_map(|input| match input.input { - None => None, - Some(Input::Params(_)) => None, - Some(_) => Some(input.clone()), - })); - - module.inputs = inputs; -} diff --git a/graph/src/substreams/sf.substreams.v1.rs b/graph/src/substreams/sf.substreams.v1.rs deleted file mode 100644 index dd6b8930293..00000000000 --- a/graph/src/substreams/sf.substreams.v1.rs +++ /dev/null @@ -1,304 +0,0 @@ -// This file is @generated by prost-build. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Package { - /// Needs to be one so this file can be used _directly_ as a - /// buf `Image` andor a ProtoSet for grpcurl and other tools - #[prost(message, repeated, tag = "1")] - pub proto_files: ::prost::alloc::vec::Vec<::prost_types::FileDescriptorProto>, - #[prost(uint64, tag = "5")] - pub version: u64, - #[prost(message, optional, tag = "6")] - pub modules: ::core::option::Option, - #[prost(message, repeated, tag = "7")] - pub module_meta: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag = "8")] - pub package_meta: ::prost::alloc::vec::Vec, - /// Source network for Substreams to fetch its data from. - #[prost(string, tag = "9")] - pub network: ::prost::alloc::string::String, - #[prost(message, optional, tag = "10")] - pub sink_config: ::core::option::Option<::prost_types::Any>, - #[prost(string, tag = "11")] - pub sink_module: ::prost::alloc::string::String, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PackageMetadata { - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - #[prost(string, tag = "2")] - pub url: ::prost::alloc::string::String, - #[prost(string, tag = "3")] - pub name: ::prost::alloc::string::String, - #[prost(string, tag = "4")] - pub doc: ::prost::alloc::string::String, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ModuleMetadata { - /// Corresponds to the index in `Package.metadata.package_meta` - #[prost(uint64, tag = "1")] - pub package_index: u64, - #[prost(string, tag = "2")] - pub doc: ::prost::alloc::string::String, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Modules { - #[prost(message, repeated, tag = "1")] - pub modules: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag = "2")] - pub binaries: ::prost::alloc::vec::Vec, -} -/// Binary represents some code compiled to its binary form. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Binary { - #[prost(string, tag = "1")] - pub r#type: ::prost::alloc::string::String, - #[prost(bytes = "vec", tag = "2")] - pub content: ::prost::alloc::vec::Vec, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Module { - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - #[prost(uint32, tag = "4")] - pub binary_index: u32, - #[prost(string, tag = "5")] - pub binary_entrypoint: ::prost::alloc::string::String, - #[prost(message, repeated, tag = "6")] - pub inputs: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag = "7")] - pub output: ::core::option::Option, - #[prost(uint64, tag = "8")] - pub initial_block: u64, - #[prost(message, optional, tag = "9")] - pub block_filter: ::core::option::Option, - #[prost(oneof = "module::Kind", tags = "2, 3, 10")] - pub kind: ::core::option::Option, -} -/// Nested message and enum types in `Module`. -pub mod module { - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct BlockFilter { - #[prost(string, tag = "1")] - pub module: ::prost::alloc::string::String, - #[prost(oneof = "block_filter::Query", tags = "2, 3")] - pub query: ::core::option::Option, - } - /// Nested message and enum types in `BlockFilter`. - pub mod block_filter { - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Query { - #[prost(string, tag = "2")] - QueryString(::prost::alloc::string::String), - #[prost(message, tag = "3")] - QueryFromParams(super::QueryFromParams), - } - } - #[derive(Clone, Copy, PartialEq, ::prost::Message)] - pub struct QueryFromParams {} - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct KindMap { - #[prost(string, tag = "1")] - pub output_type: ::prost::alloc::string::String, - } - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct KindStore { - /// The `update_policy` determines the functions available to mutate the store - /// (like `set()`, `set_if_not_exists()` or `sum()`, etc..) in - /// order to ensure that parallel operations are possible and deterministic - /// - /// Say a store cumulates keys from block 0 to 1M, and a second store - /// cumulates keys from block 1M to 2M. When we want to use this - /// store as a dependency for a downstream module, we will merge the - /// two stores according to this policy. - #[prost(enumeration = "kind_store::UpdatePolicy", tag = "1")] - pub update_policy: i32, - #[prost(string, tag = "2")] - pub value_type: ::prost::alloc::string::String, - } - /// Nested message and enum types in `KindStore`. - pub mod kind_store { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum UpdatePolicy { - Unset = 0, - /// Provides a store where you can `set()` keys, and the latest key wins - Set = 1, - /// Provides a store where you can `set_if_not_exists()` keys, and the first key wins - SetIfNotExists = 2, - /// Provides a store where you can `add_*()` keys, where two stores merge by summing its values. - Add = 3, - /// Provides a store where you can `min_*()` keys, where two stores merge by leaving the minimum value. - Min = 4, - /// Provides a store where you can `max_*()` keys, where two stores merge by leaving the maximum value. - Max = 5, - /// Provides a store where you can `append()` keys, where two stores merge by concatenating the bytes in order. - Append = 6, - /// Provides a store with both `set()` and `sum()` functions. - SetSum = 7, - } - impl UpdatePolicy { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Unset => "UPDATE_POLICY_UNSET", - Self::Set => "UPDATE_POLICY_SET", - Self::SetIfNotExists => "UPDATE_POLICY_SET_IF_NOT_EXISTS", - Self::Add => "UPDATE_POLICY_ADD", - Self::Min => "UPDATE_POLICY_MIN", - Self::Max => "UPDATE_POLICY_MAX", - Self::Append => "UPDATE_POLICY_APPEND", - Self::SetSum => "UPDATE_POLICY_SET_SUM", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UPDATE_POLICY_UNSET" => Some(Self::Unset), - "UPDATE_POLICY_SET" => Some(Self::Set), - "UPDATE_POLICY_SET_IF_NOT_EXISTS" => Some(Self::SetIfNotExists), - "UPDATE_POLICY_ADD" => Some(Self::Add), - "UPDATE_POLICY_MIN" => Some(Self::Min), - "UPDATE_POLICY_MAX" => Some(Self::Max), - "UPDATE_POLICY_APPEND" => Some(Self::Append), - "UPDATE_POLICY_SET_SUM" => Some(Self::SetSum), - _ => None, - } - } - } - } - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct KindBlockIndex { - #[prost(string, tag = "1")] - pub output_type: ::prost::alloc::string::String, - } - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct Input { - #[prost(oneof = "input::Input", tags = "1, 2, 3, 4")] - pub input: ::core::option::Option, - } - /// Nested message and enum types in `Input`. - pub mod input { - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct Source { - /// ex: "sf.ethereum.type.v1.Block" - #[prost(string, tag = "1")] - pub r#type: ::prost::alloc::string::String, - } - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct Map { - /// ex: "block_to_pairs" - #[prost(string, tag = "1")] - pub module_name: ::prost::alloc::string::String, - } - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct Store { - #[prost(string, tag = "1")] - pub module_name: ::prost::alloc::string::String, - #[prost(enumeration = "store::Mode", tag = "2")] - pub mode: i32, - } - /// Nested message and enum types in `Store`. - pub mod store { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Mode { - Unset = 0, - Get = 1, - Deltas = 2, - } - impl Mode { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Unset => "UNSET", - Self::Get => "GET", - Self::Deltas => "DELTAS", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNSET" => Some(Self::Unset), - "GET" => Some(Self::Get), - "DELTAS" => Some(Self::Deltas), - _ => None, - } - } - } - } - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct Params { - #[prost(string, tag = "1")] - pub value: ::prost::alloc::string::String, - } - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Input { - #[prost(message, tag = "1")] - Source(Source), - #[prost(message, tag = "2")] - Map(Map), - #[prost(message, tag = "3")] - Store(Store), - #[prost(message, tag = "4")] - Params(Params), - } - } - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct Output { - #[prost(string, tag = "1")] - pub r#type: ::prost::alloc::string::String, - } - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Kind { - #[prost(message, tag = "2")] - KindMap(KindMap), - #[prost(message, tag = "3")] - KindStore(KindStore), - #[prost(message, tag = "10")] - KindBlockIndex(KindBlockIndex), - } -} -/// Clock is a pointer to a block with added timestamp -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Clock { - #[prost(string, tag = "1")] - pub id: ::prost::alloc::string::String, - #[prost(uint64, tag = "2")] - pub number: u64, - #[prost(message, optional, tag = "3")] - pub timestamp: ::core::option::Option<::prost_types::Timestamp>, -} -/// BlockRef is a pointer to a block to which we don't know the timestamp -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockRef { - #[prost(string, tag = "1")] - pub id: ::prost::alloc::string::String, - #[prost(uint64, tag = "2")] - pub number: u64, -} diff --git a/graph/src/substreams_rpc/codec.rs b/graph/src/substreams_rpc/codec.rs deleted file mode 100644 index d70a9e53762..00000000000 --- a/graph/src/substreams_rpc/codec.rs +++ /dev/null @@ -1,5 +0,0 @@ -#[rustfmt::skip] -#[path = "sf.substreams.rpc.v2.rs"] -mod pbsubstreamsrpc; - -pub use pbsubstreamsrpc::*; diff --git a/graph/src/substreams_rpc/mod.rs b/graph/src/substreams_rpc/mod.rs deleted file mode 100644 index 38e96fd598d..00000000000 --- a/graph/src/substreams_rpc/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod codec; - -pub use codec::*; diff --git a/graph/src/substreams_rpc/sf.firehose.v2.rs b/graph/src/substreams_rpc/sf.firehose.v2.rs deleted file mode 100644 index 905a7038bf5..00000000000 --- a/graph/src/substreams_rpc/sf.firehose.v2.rs +++ /dev/null @@ -1,896 +0,0 @@ -// This file is @generated by prost-build. -/// Generated client implementations. -pub mod stream_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct StreamClient { - inner: tonic::client::Grpc, - } - impl StreamClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl StreamClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> StreamClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - StreamClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - pub async fn blocks( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response>, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/sf.firehose.v2.Stream/Blocks", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("sf.firehose.v2.Stream", "Blocks")); - self.inner.server_streaming(req, path, codec).await - } - } -} -/// Generated client implementations. -pub mod fetch_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct FetchClient { - inner: tonic::client::Grpc, - } - impl FetchClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl FetchClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> FetchClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - FetchClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - pub async fn block( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/sf.firehose.v2.Fetch/Block", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("sf.firehose.v2.Fetch", "Block")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated client implementations. -pub mod endpoint_info_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct EndpointInfoClient { - inner: tonic::client::Grpc, - } - impl EndpointInfoClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl EndpointInfoClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> EndpointInfoClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - EndpointInfoClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - pub async fn info( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/sf.firehose.v2.EndpointInfo/Info", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("sf.firehose.v2.EndpointInfo", "Info")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod stream_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with StreamServer. - #[async_trait] - pub trait Stream: std::marker::Send + std::marker::Sync + 'static { - /// Server streaming response type for the Blocks method. - type BlocksStream: tonic::codegen::tokio_stream::Stream< - Item = std::result::Result, - > - + std::marker::Send - + 'static; - async fn blocks( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - } - #[derive(Debug)] - pub struct StreamServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl StreamServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for StreamServer - where - T: Stream, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/sf.firehose.v2.Stream/Blocks" => { - #[allow(non_camel_case_types)] - struct BlocksSvc(pub Arc); - impl< - T: Stream, - > tonic::server::ServerStreamingService - for BlocksSvc { - type Response = crate::firehose::Response; - type ResponseStream = T::BlocksStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::blocks(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = BlocksSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.server_streaming(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new(empty_body()); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for StreamServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "sf.firehose.v2.Stream"; - impl tonic::server::NamedService for StreamServer { - const NAME: &'static str = SERVICE_NAME; - } -} -/// Generated server implementations. -pub mod fetch_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with FetchServer. - #[async_trait] - pub trait Fetch: std::marker::Send + std::marker::Sync + 'static { - async fn block( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - #[derive(Debug)] - pub struct FetchServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl FetchServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for FetchServer - where - T: Fetch, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/sf.firehose.v2.Fetch/Block" => { - #[allow(non_camel_case_types)] - struct BlockSvc(pub Arc); - impl< - T: Fetch, - > tonic::server::UnaryService - for BlockSvc { - type Response = crate::firehose::SingleBlockResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::block(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = BlockSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new(empty_body()); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for FetchServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "sf.firehose.v2.Fetch"; - impl tonic::server::NamedService for FetchServer { - const NAME: &'static str = SERVICE_NAME; - } -} -/// Generated server implementations. -pub mod endpoint_info_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with EndpointInfoServer. - #[async_trait] - pub trait EndpointInfo: std::marker::Send + std::marker::Sync + 'static { - async fn info( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - #[derive(Debug)] - pub struct EndpointInfoServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl EndpointInfoServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for EndpointInfoServer - where - T: EndpointInfo, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/sf.firehose.v2.EndpointInfo/Info" => { - #[allow(non_camel_case_types)] - struct InfoSvc(pub Arc); - impl< - T: EndpointInfo, - > tonic::server::UnaryService - for InfoSvc { - type Response = crate::firehose::InfoResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::info(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = InfoSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new(empty_body()); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for EndpointInfoServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "sf.firehose.v2.EndpointInfo"; - impl tonic::server::NamedService for EndpointInfoServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/graph/src/substreams_rpc/sf.substreams.rpc.v2.rs b/graph/src/substreams_rpc/sf.substreams.rpc.v2.rs deleted file mode 100644 index ff69b343d29..00000000000 --- a/graph/src/substreams_rpc/sf.substreams.rpc.v2.rs +++ /dev/null @@ -1,946 +0,0 @@ -// This file is @generated by prost-build. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Request { - #[prost(int64, tag = "1")] - pub start_block_num: i64, - #[prost(string, tag = "2")] - pub start_cursor: ::prost::alloc::string::String, - #[prost(uint64, tag = "3")] - pub stop_block_num: u64, - /// With final_block_only, you only receive blocks that are irreversible: - /// 'final_block_height' will be equal to current block and no 'undo_signal' - /// will ever be sent - #[prost(bool, tag = "4")] - pub final_blocks_only: bool, - /// Substreams has two mode when executing your module(s) either development - /// mode or production mode. Development and production modes impact the - /// execution of Substreams, important aspects of execution include: - /// * The time required to reach the first byte. - /// * The speed that large ranges get executed. - /// * The module logs and outputs sent back to the client. - /// - /// By default, the engine runs in developer mode, with richer and deeper - /// output. Differences between production and development modes include: - /// * Forward parallel execution is enabled in production mode and disabled in - /// development mode - /// * The time required to reach the first byte in development mode is faster - /// than in production mode. - /// - /// Specific attributes of development mode include: - /// * The client will receive all of the executed module's logs. - /// * It's possible to request specific store snapshots in the execution tree - /// (via `debug_initial_store_snapshot_for_modules`). - /// * Multiple module's output is possible. - /// - /// With production mode`, however, you trade off functionality for high speed - /// enabling forward parallel execution of module ahead of time. - #[prost(bool, tag = "5")] - pub production_mode: bool, - #[prost(string, tag = "6")] - pub output_module: ::prost::alloc::string::String, - #[prost(message, optional, tag = "7")] - pub modules: ::core::option::Option, - /// Available only in developer mode - #[prost(string, repeated, tag = "10")] - pub debug_initial_store_snapshot_for_modules: ::prost::alloc::vec::Vec< - ::prost::alloc::string::String, - >, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Response { - #[prost(oneof = "response::Message", tags = "1, 2, 3, 4, 5, 10, 11")] - pub message: ::core::option::Option, -} -/// Nested message and enum types in `Response`. -pub mod response { - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Message { - /// Always sent first - #[prost(message, tag = "1")] - Session(super::SessionInit), - /// Progress of data preparation, before - #[prost(message, tag = "2")] - Progress(super::ModulesProgress), - /// sending in the stream of `data` events. - #[prost(message, tag = "3")] - BlockScopedData(super::BlockScopedData), - #[prost(message, tag = "4")] - BlockUndoSignal(super::BlockUndoSignal), - #[prost(message, tag = "5")] - FatalError(super::Error), - /// Available only in developer mode, and only if - /// `debug_initial_store_snapshot_for_modules` is set. - #[prost(message, tag = "10")] - DebugSnapshotData(super::InitialSnapshotData), - /// Available only in developer mode, and only if - /// `debug_initial_store_snapshot_for_modules` is set. - #[prost(message, tag = "11")] - DebugSnapshotComplete(super::InitialSnapshotComplete), - } -} -/// BlockUndoSignal informs you that every bit of data -/// with a block number above 'last_valid_block' has been reverted -/// on-chain. Delete that data and restart from 'last_valid_cursor' -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockUndoSignal { - #[prost(message, optional, tag = "1")] - pub last_valid_block: ::core::option::Option, - #[prost(string, tag = "2")] - pub last_valid_cursor: ::prost::alloc::string::String, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockScopedData { - #[prost(message, optional, tag = "1")] - pub output: ::core::option::Option, - #[prost(message, optional, tag = "2")] - pub clock: ::core::option::Option, - #[prost(string, tag = "3")] - pub cursor: ::prost::alloc::string::String, - /// Non-deterministic, allows substreams-sink to let go of their undo data. - #[prost(uint64, tag = "4")] - pub final_block_height: u64, - #[prost(message, repeated, tag = "10")] - pub debug_map_outputs: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag = "11")] - pub debug_store_outputs: ::prost::alloc::vec::Vec, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SessionInit { - #[prost(string, tag = "1")] - pub trace_id: ::prost::alloc::string::String, - #[prost(uint64, tag = "2")] - pub resolved_start_block: u64, - #[prost(uint64, tag = "3")] - pub linear_handoff_block: u64, - #[prost(uint64, tag = "4")] - pub max_parallel_workers: u64, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct InitialSnapshotComplete { - #[prost(string, tag = "1")] - pub cursor: ::prost::alloc::string::String, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct InitialSnapshotData { - #[prost(string, tag = "1")] - pub module_name: ::prost::alloc::string::String, - #[prost(message, repeated, tag = "2")] - pub deltas: ::prost::alloc::vec::Vec, - #[prost(uint64, tag = "4")] - pub sent_keys: u64, - #[prost(uint64, tag = "3")] - pub total_keys: u64, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct MapModuleOutput { - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - #[prost(message, optional, tag = "2")] - pub map_output: ::core::option::Option<::prost_types::Any>, - /// DebugOutputInfo is available in non-production mode only - #[prost(message, optional, tag = "10")] - pub debug_info: ::core::option::Option, -} -/// StoreModuleOutput are produced for store modules in development mode. -/// It is not possible to retrieve store models in production, with -/// parallelization enabled. If you need the deltas directly, write a pass -/// through mapper module that will get them down to you. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct StoreModuleOutput { - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - #[prost(message, repeated, tag = "2")] - pub debug_store_deltas: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag = "10")] - pub debug_info: ::core::option::Option, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct OutputDebugInfo { - #[prost(string, repeated, tag = "1")] - pub logs: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// LogsTruncated is a flag that tells you if you received all the logs or if - /// they were truncated because you logged too much (fixed limit currently is - /// set to 128 KiB). - #[prost(bool, tag = "2")] - pub logs_truncated: bool, - #[prost(bool, tag = "3")] - pub cached: bool, -} -/// ModulesProgress is a message that is sent every 500ms -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ModulesProgress { - /// List of jobs running on tier2 servers - #[prost(message, repeated, tag = "2")] - pub running_jobs: ::prost::alloc::vec::Vec, - /// Execution statistics for each module - #[prost(message, repeated, tag = "3")] - pub modules_stats: ::prost::alloc::vec::Vec, - /// Stages definition and completed block ranges - #[prost(message, repeated, tag = "4")] - pub stages: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag = "5")] - pub processed_bytes: ::core::option::Option, -} -#[derive(Clone, Copy, PartialEq, ::prost::Message)] -pub struct ProcessedBytes { - #[prost(uint64, tag = "1")] - pub total_bytes_read: u64, - #[prost(uint64, tag = "2")] - pub total_bytes_written: u64, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Error { - #[prost(string, tag = "1")] - pub module: ::prost::alloc::string::String, - #[prost(string, tag = "2")] - pub reason: ::prost::alloc::string::String, - #[prost(string, repeated, tag = "3")] - pub logs: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// FailureLogsTruncated is a flag that tells you if you received all the logs - /// or if they were truncated because you logged too much (fixed limit - /// currently is set to 128 KiB). - #[prost(bool, tag = "4")] - pub logs_truncated: bool, -} -#[derive(Clone, Copy, PartialEq, ::prost::Message)] -pub struct Job { - #[prost(uint32, tag = "1")] - pub stage: u32, - #[prost(uint64, tag = "2")] - pub start_block: u64, - #[prost(uint64, tag = "3")] - pub stop_block: u64, - #[prost(uint64, tag = "4")] - pub processed_blocks: u64, - #[prost(uint64, tag = "5")] - pub duration_ms: u64, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Stage { - #[prost(string, repeated, tag = "1")] - pub modules: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - #[prost(message, repeated, tag = "2")] - pub completed_ranges: ::prost::alloc::vec::Vec, -} -/// ModuleStats gathers metrics and statistics from each module, running on tier1 -/// or tier2 All the 'count' and 'time_ms' values may include duplicate for each -/// stage going over that module -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ModuleStats { - /// name of the module - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// total_processed_blocks is the sum of blocks sent to that module code - #[prost(uint64, tag = "2")] - pub total_processed_block_count: u64, - /// total_processing_time_ms is the sum of all time spent running that module - /// code - #[prost(uint64, tag = "3")] - pub total_processing_time_ms: u64, - /// // external_calls are chain-specific intrinsics, like "Ethereum RPC calls". - #[prost(message, repeated, tag = "4")] - pub external_call_metrics: ::prost::alloc::vec::Vec, - /// total_store_operation_time_ms is the sum of all time spent running that - /// module code waiting for a store operation (ex: read, write, delete...) - #[prost(uint64, tag = "5")] - pub total_store_operation_time_ms: u64, - /// total_store_read_count is the sum of all the store Read operations called - /// from that module code - #[prost(uint64, tag = "6")] - pub total_store_read_count: u64, - /// total_store_write_count is the sum of all store Write operations called - /// from that module code (store-only) - #[prost(uint64, tag = "10")] - pub total_store_write_count: u64, - /// total_store_deleteprefix_count is the sum of all store DeletePrefix - /// operations called from that module code (store-only) note that DeletePrefix - /// can be a costly operation on large stores - #[prost(uint64, tag = "11")] - pub total_store_deleteprefix_count: u64, - /// store_size_bytes is the uncompressed size of the full KV store for that - /// module, from the last 'merge' operation (store-only) - #[prost(uint64, tag = "12")] - pub store_size_bytes: u64, - /// total_store_merging_time_ms is the time spent merging partial stores into a - /// full KV store for that module (store-only) - #[prost(uint64, tag = "13")] - pub total_store_merging_time_ms: u64, - /// store_currently_merging is true if there is a merging operation (partial - /// store to full KV store) on the way. - #[prost(bool, tag = "14")] - pub store_currently_merging: bool, - /// highest_contiguous_block is the highest block in the highest merged full KV - /// store of that module (store-only) - #[prost(uint64, tag = "15")] - pub highest_contiguous_block: u64, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExternalCallMetric { - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - #[prost(uint64, tag = "2")] - pub count: u64, - #[prost(uint64, tag = "3")] - pub time_ms: u64, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct StoreDelta { - #[prost(enumeration = "store_delta::Operation", tag = "1")] - pub operation: i32, - #[prost(uint64, tag = "2")] - pub ordinal: u64, - #[prost(string, tag = "3")] - pub key: ::prost::alloc::string::String, - #[prost(bytes = "vec", tag = "4")] - pub old_value: ::prost::alloc::vec::Vec, - #[prost(bytes = "vec", tag = "5")] - pub new_value: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `StoreDelta`. -pub mod store_delta { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Operation { - Unset = 0, - Create = 1, - Update = 2, - Delete = 3, - } - impl Operation { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Self::Unset => "UNSET", - Self::Create => "CREATE", - Self::Update => "UPDATE", - Self::Delete => "DELETE", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNSET" => Some(Self::Unset), - "CREATE" => Some(Self::Create), - "UPDATE" => Some(Self::Update), - "DELETE" => Some(Self::Delete), - _ => None, - } - } - } -} -#[derive(Clone, Copy, PartialEq, ::prost::Message)] -pub struct BlockRange { - #[prost(uint64, tag = "2")] - pub start_block: u64, - #[prost(uint64, tag = "3")] - pub end_block: u64, -} -/// Generated client implementations. -pub mod endpoint_info_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct EndpointInfoClient { - inner: tonic::client::Grpc, - } - impl EndpointInfoClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl EndpointInfoClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> EndpointInfoClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - EndpointInfoClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - pub async fn info( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/sf.substreams.rpc.v2.EndpointInfo/Info", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("sf.substreams.rpc.v2.EndpointInfo", "Info")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated client implementations. -pub mod stream_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - #[derive(Debug, Clone)] - pub struct StreamClient { - inner: tonic::client::Grpc, - } - impl StreamClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl StreamClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> StreamClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - StreamClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - pub async fn blocks( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response>, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/sf.substreams.rpc.v2.Stream/Blocks", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("sf.substreams.rpc.v2.Stream", "Blocks")); - self.inner.server_streaming(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod endpoint_info_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with EndpointInfoServer. - #[async_trait] - pub trait EndpointInfo: std::marker::Send + std::marker::Sync + 'static { - async fn info( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - #[derive(Debug)] - pub struct EndpointInfoServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl EndpointInfoServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for EndpointInfoServer - where - T: EndpointInfo, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/sf.substreams.rpc.v2.EndpointInfo/Info" => { - #[allow(non_camel_case_types)] - struct InfoSvc(pub Arc); - impl< - T: EndpointInfo, - > tonic::server::UnaryService - for InfoSvc { - type Response = crate::firehose::InfoResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::info(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = InfoSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new(empty_body()); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for EndpointInfoServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "sf.substreams.rpc.v2.EndpointInfo"; - impl tonic::server::NamedService for EndpointInfoServer { - const NAME: &'static str = SERVICE_NAME; - } -} -/// Generated server implementations. -pub mod stream_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with StreamServer. - #[async_trait] - pub trait Stream: std::marker::Send + std::marker::Sync + 'static { - /// Server streaming response type for the Blocks method. - type BlocksStream: tonic::codegen::tokio_stream::Stream< - Item = std::result::Result, - > - + std::marker::Send - + 'static; - async fn blocks( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - } - #[derive(Debug)] - pub struct StreamServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl StreamServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for StreamServer - where - T: Stream, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/sf.substreams.rpc.v2.Stream/Blocks" => { - #[allow(non_camel_case_types)] - struct BlocksSvc(pub Arc); - impl tonic::server::ServerStreamingService - for BlocksSvc { - type Response = super::Response; - type ResponseStream = T::BlocksStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::blocks(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = BlocksSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.server_streaming(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new(empty_body()); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for StreamServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "sf.substreams.rpc.v2.Stream"; - impl tonic::server::NamedService for StreamServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/node/Cargo.toml b/node/Cargo.toml index 63723442423..b60128772af 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -24,7 +24,6 @@ graph = { path = "../graph" } graph-core = { path = "../core" } graph-chain-ethereum = { path = "../chain/ethereum" } graph-chain-near = { path = "../chain/near" } -graph-chain-substreams = { path = "../chain/substreams" } graph-graphql = { path = "../graphql" } graph-server-http = { path = "../server/http" } graph-server-index-node = { path = "../server/index-node" } diff --git a/node/resources/tests/full_config.toml b/node/resources/tests/full_config.toml index 1f907539194..057e774d93e 100644 --- a/node/resources/tests/full_config.toml +++ b/node/resources/tests/full_config.toml @@ -49,7 +49,6 @@ provider = [ { label = "mainnet-0", url = "http://rpc.mainnet.io", features = ["archive", "traces"] }, { label = "mainnet-1", details = { type = "web3call", url = "http://rpc.mainnet.io", features = ["archive", "traces"] }}, { label = "firehose", details = { type = "firehose", url = "http://localhost:9000", features = [] }}, - { label = "substreams", details = { type = "substreams", url = "http://localhost:9000", features = [] }}, ] [chains.ropsten] diff --git a/node/src/chain.rs b/node/src/chain.rs index 543a0cd5cfb..9df653bc9d2 100644 --- a/node/src/chain.rs +++ b/node/src/chain.rs @@ -10,16 +10,13 @@ use ethereum::network::EthereumNetworkAdapter; use ethereum::ProviderEthRpcMetrics; use graph::anyhow::bail; use graph::blockchain::client::ChainClient; -use graph::blockchain::{ - BasicBlockchainBuilder, Blockchain, BlockchainBuilder as _, BlockchainKind, BlockchainMap, - ChainIdentifier, -}; +use graph::blockchain::{BlockchainKind, BlockchainMap, ChainIdentifier}; use graph::cheap_clone::CheapClone; use graph::components::network_provider::ChainName; -use graph::components::store::{BlockStore as _, ChainHeadStore}; +use graph::components::store::BlockStore as _; use graph::endpoint::EndpointMetrics; use graph::env::{EnvVars, ENV_VARS}; -use graph::firehose::{FirehoseEndpoint, SubgraphLimit}; +use graph::firehose::FirehoseEndpoint; use graph::futures03::future::try_join_all; use graph::itertools::Itertools; use graph::log::factory::LoggerFactory; @@ -75,74 +72,6 @@ impl ChainFilter for OneChainFilter { } } -pub fn create_substreams_networks( - logger: Logger, - config: &Config, - endpoint_metrics: Arc, - chain_filter: &dyn ChainFilter, -) -> Vec { - debug!( - logger, - "Creating firehose networks [{} chains, ingestor {}]", - config.chains.chains.len(), - config.chains.ingestor, - ); - - let mut networks_by_kind: BTreeMap<(BlockchainKind, ChainName), Vec>> = - BTreeMap::new(); - - let filtered_chains = config - .chains - .chains - .iter() - .filter(|(name, _)| chain_filter.filter(name)); - - for (name, chain) in filtered_chains { - let name: ChainName = name.as_str().into(); - for provider in &chain.providers { - if let ProviderDetails::Substreams(ref firehose) = provider.details { - info!( - logger, - "Configuring substreams endpoint"; - "provider" => &provider.label, - "network" => &name.to_string(), - ); - - let parsed_networks = networks_by_kind - .entry((chain.protocol, name.clone())) - .or_insert_with(Vec::new); - - for _ in 0..firehose.conn_pool_size { - parsed_networks.push(Arc::new(FirehoseEndpoint::new( - // This label needs to be the original label so that the metrics - // can be deduped. - &provider.label, - &firehose.url, - firehose.token.clone(), - firehose.key.clone(), - firehose.filters_enabled(), - firehose.compression_enabled(), - SubgraphLimit::Unlimited, - endpoint_metrics.clone(), - true, - ))); - } - } - } - } - - networks_by_kind - .into_iter() - .map(|((kind, chain_id), endpoints)| { - AdapterConfiguration::Substreams(FirehoseAdapterConfig { - chain_id, - kind, - adapters: endpoints.into(), - }) - }) - .collect() -} - pub fn create_firehose_networks( logger: Logger, config: &Config, @@ -199,7 +128,6 @@ pub fn create_firehose_networks( firehose.compression_enabled(), firehose.limit_for(&config.node), endpoint_metrics.cheap_clone(), - false, ))); } } @@ -400,36 +328,6 @@ pub async fn networks_as_chains( } }; - async fn add_substreams( - networks: &Networks, - config: &Arc, - chain_id: ChainName, - blockchain_map: &mut BlockchainMap, - logger_factory: LoggerFactory, - chain_head_store: Arc, - metrics_registry: Arc, - ) { - let substreams_endpoints = networks.substreams_endpoints(chain_id.clone()); - if substreams_endpoints.len() == 0 { - return; - } - - blockchain_map.insert::( - chain_id.clone(), - Arc::new( - BasicBlockchainBuilder { - logger_factory: logger_factory.clone(), - name: chain_id.clone(), - chain_head_store, - metrics_registry: metrics_registry.clone(), - firehose_endpoints: substreams_endpoints, - } - .build(config) - .await, - ), - ); - } - match kind { BlockchainKind::Ethereum => { // polling interval is set per chain so if set all adapter configuration will have @@ -480,62 +378,17 @@ pub async fn networks_as_chains( blockchain_map .insert::(chain_id.clone(), Arc::new(chain)); - - add_substreams::( - networks, - config, - chain_id.clone(), - blockchain_map, - logger_factory.clone(), - chain_store, - metrics_registry.clone(), - ) - .await; } BlockchainKind::Near => { let firehose_endpoints = networks.firehose_endpoints(chain_id.clone()); - blockchain_map.insert::( - chain_id.clone(), - Arc::new( - BasicBlockchainBuilder { - logger_factory: logger_factory.clone(), - name: chain_id.clone(), - chain_head_store: chain_store.cheap_clone(), - firehose_endpoints, - metrics_registry: metrics_registry.clone(), - } - .build(config) - .await, - ), - ); - - add_substreams::( - networks, - config, - chain_id.clone(), - blockchain_map, + let chain = graph_chain_near::Chain::new( logger_factory.clone(), - chain_store, - metrics_registry.clone(), - ) - .await; - } - BlockchainKind::Substreams => { - let substreams_endpoints = networks.substreams_endpoints(chain_id.clone()); - blockchain_map.insert::( chain_id.clone(), - Arc::new( - BasicBlockchainBuilder { - logger_factory: logger_factory.clone(), - name: chain_id.clone(), - chain_head_store: chain_store, - metrics_registry: metrics_registry.clone(), - firehose_endpoints: substreams_endpoints, - } - .build(config) - .await, - ), + chain_store.cheap_clone(), + firehose_endpoints, + metrics_registry.clone(), ); + blockchain_map.insert::(chain_id.clone(), Arc::new(chain)); } } } diff --git a/node/src/config.rs b/node/src/config.rs index db2a5c203e9..0d781375c43 100644 --- a/node/src/config.rs +++ b/node/src/config.rs @@ -568,30 +568,6 @@ impl Chain { provider.validate()? } - if !matches!(self.protocol, BlockchainKind::Substreams) { - let has_only_substreams_providers = self - .providers - .iter() - .all(|provider| matches!(provider.details, ProviderDetails::Substreams(_))); - if has_only_substreams_providers { - bail!( - "{} protocol requires an rpc or firehose endpoint defined", - self.protocol - ); - } - } - - // When using substreams protocol, only substreams endpoints are allowed - if matches!(self.protocol, BlockchainKind::Substreams) { - let has_non_substreams_providers = self - .providers - .iter() - .any(|provider| !matches!(provider.details, ProviderDetails::Substreams(_))); - if has_non_substreams_providers { - bail!("Substreams protocol only supports substreams providers"); - } - } - Ok(()) } } @@ -628,7 +604,6 @@ pub struct Provider { pub enum ProviderDetails { Firehose(FirehoseProvider), Web3(Web3Provider), - Substreams(FirehoseProvider), Web3Call(Web3Provider), } @@ -747,8 +722,7 @@ impl Provider { validate_name(&self.label).context("illegal provider name")?; match self.details { - ProviderDetails::Firehose(ref mut firehose) - | ProviderDetails::Substreams(ref mut firehose) => { + ProviderDetails::Firehose(ref mut firehose) => { firehose.url = shellexpand::env(&firehose.url)?.into_owned(); // A Firehose url must be a valid Uri since gRPC library we use (Tonic) @@ -904,10 +878,7 @@ impl<'de> Deserialize<'de> for Provider { } match v { - ProviderDetails::Firehose(ref mut firehose) - | ProviderDetails::Substreams(ref mut firehose) => { - firehose.rules = nodes - } + ProviderDetails::Firehose(ref mut firehose) => firehose.rules = nodes, _ => {} } @@ -1393,47 +1364,6 @@ mod tests { ); } - #[test] - fn fails_if_non_substreams_provider_for_substreams_protocol() { - let mut actual = toml::from_str::( - r#" - ingestor = "block_ingestor_node" - [mainnet] - shard = "primary" - protocol = "substreams" - provider = [ - { label = "firehose", details = { type = "firehose", url = "http://127.0.0.1:8888", token = "TOKEN", features = ["filters"] }}, - ] - "#, - ) - .unwrap(); - let err = actual.validate().unwrap_err().to_string(); - - assert!(err.contains("only supports substreams providers"), "{err}"); - } - - #[test] - fn fails_if_only_substreams_provider_for_non_substreams_protocol() { - let mut actual = toml::from_str::( - r#" - ingestor = "block_ingestor_node" - [mainnet] - shard = "primary" - protocol = "ethereum" - provider = [ - { label = "firehose", details = { type = "substreams", url = "http://127.0.0.1:8888", token = "TOKEN", features = ["filters"] }}, - ] - "#, - ) - .unwrap(); - let err = actual.validate().unwrap_err().to_string(); - - assert!( - err.contains("ethereum protocol requires an rpc or firehose endpoint defined"), - "{err}" - ); - } - #[test] fn it_works_on_new_web3_provider_from_toml() { let actual = toml::from_str( @@ -1532,55 +1462,16 @@ mod tests { } #[test] - fn it_works_on_substreams_provider_from_toml() { - let actual = toml::from_str( + fn it_fails_for_substreams() { + let actual: Result = toml::from_str( r#" label = "bananas" details = { type = "substreams", url = "http://localhost:9000", features = [] } "#, - ) - .unwrap(); - - assert_eq!( - Provider { - label: "bananas".to_owned(), - details: ProviderDetails::Substreams(FirehoseProvider { - url: "http://localhost:9000".to_owned(), - token: None, - key: None, - features: BTreeSet::new(), - conn_pool_size: 20, - rules: vec![], - }), - }, - actual - ); - } - - #[test] - fn it_works_on_substreams_provider_from_toml_with_api_key() { - let actual = toml::from_str( - r#" - label = "authed" - details = { type = "substreams", url = "http://localhost:9000", key = "KEY", features = [] } - "#, - ) - .unwrap(); - - assert_eq!( - Provider { - label: "authed".to_owned(), - details: ProviderDetails::Substreams(FirehoseProvider { - url: "http://localhost:9000".to_owned(), - token: None, - key: Some("KEY".to_owned()), - features: BTreeSet::new(), - conn_pool_size: 20, - rules: vec![], - }), - }, - actual ); + assert!(actual.is_err()); + let err = actual.unwrap_err().to_string(); + assert!(err.contains("unknown variant `substreams`")); } #[test] @@ -1649,123 +1540,6 @@ mod tests { assert! { actual.validate().is_ok()}; } - #[test] - fn it_errors_on_firehose_provider_with_high_limit() { - let mut actual = toml::from_str( - r#" - label = "substreams" - details = { type = "substreams", url = "http://localhost:9000" } - match = [ - { name = "some_node_.*", limit = 101 }, - { name = "other_node_.*", limit = 0 } ] - "#, - ) - .unwrap(); - - assert_eq!( - Provider { - label: "substreams".to_owned(), - details: ProviderDetails::Substreams(FirehoseProvider { - url: "http://localhost:9000".to_owned(), - token: None, - key: None, - features: BTreeSet::new(), - conn_pool_size: 20, - rules: vec![ - Web3Rule { - name: Regex::new("some_node_.*").unwrap(), - limit: 101, - }, - Web3Rule { - name: Regex::new("other_node_.*").unwrap(), - limit: 0, - } - ], - }), - }, - actual - ); - assert! { actual.validate().is_err()}; - } - - #[test] - fn it_works_on_new_substreams_provider_with_doc_example_match() { - let mut actual = toml::from_str( - r#" - label = "substreams" - details = { type = "substreams", url = "http://localhost:9000" } - match = [ - { name = "some_node_.*", limit = 10 }, - { name = "other_node_.*", limit = 0 } ] - "#, - ) - .unwrap(); - - assert_eq!( - Provider { - label: "substreams".to_owned(), - details: ProviderDetails::Substreams(FirehoseProvider { - url: "http://localhost:9000".to_owned(), - token: None, - key: None, - features: BTreeSet::new(), - conn_pool_size: 20, - rules: vec![ - Web3Rule { - name: Regex::new("some_node_.*").unwrap(), - limit: 10, - }, - Web3Rule { - name: Regex::new("other_node_.*").unwrap(), - limit: 0, - } - ], - }), - }, - actual - ); - assert! { actual.validate().is_ok()}; - } - - #[test] - fn it_errors_on_substreams_provider_with_high_limit() { - let mut actual = toml::from_str( - r#" - label = "substreams" - details = { type = "substreams", url = "http://localhost:9000" } - match = [ - { name = "some_node_.*", limit = 101 }, - { name = "other_node_.*", limit = 0 } ] - "#, - ) - .unwrap(); - - assert_eq!( - Provider { - label: "substreams".to_owned(), - details: ProviderDetails::Substreams(FirehoseProvider { - url: "http://localhost:9000".to_owned(), - token: None, - key: None, - features: BTreeSet::new(), - conn_pool_size: 20, - rules: vec![ - Web3Rule { - name: Regex::new("some_node_.*").unwrap(), - limit: 101, - }, - Web3Rule { - name: Regex::new("other_node_.*").unwrap(), - limit: 0, - } - ], - }), - }, - actual - ); - assert! { actual.validate().is_err()}; - } - #[test] fn it_works_on_new_firehose_provider_from_toml_unsupported_features() { let actual = toml::from_str::( diff --git a/node/src/manager/commands/provider_checks.rs b/node/src/manager/commands/provider_checks.rs index a2541be6c7a..12f2e98e00b 100644 --- a/node/src/manager/commands/provider_checks.rs +++ b/node/src/manager/commands/provider_checks.rs @@ -67,27 +67,6 @@ pub async fn execute(logger: &Logger, networks: &Networks, store: BlockStore, ti } } - for adapter in networks - .substreams_provider_manager - .providers_unchecked(chain_name) - .unique_by(|x| x.provider_name()) - { - let validator = chain_id_validator(Box::new(store.cheap_clone())); - match tokio::time::timeout( - timeout, - run_checks(logger, chain_name, adapter, validator.clone()), - ) - .await - { - Ok(result) => { - errors.extend(result); - } - Err(_) => { - errors.push("Timeout".to_owned()); - } - } - } - if errors.is_empty() { println!("Chain: {chain_name}; Status: OK"); continue; diff --git a/node/src/network_setup.rs b/node/src/network_setup.rs index 63cfe8097b4..c35b5fe4831 100644 --- a/node/src/network_setup.rs +++ b/node/src/network_setup.rs @@ -30,8 +30,8 @@ use graph_store_postgres::{BlockStore, ChainHeadUpdateListener}; use std::{any::Any, cmp::Ordering, sync::Arc, time::Duration}; use crate::chain::{ - create_ethereum_networks, create_firehose_networks, create_substreams_networks, - networks_as_chains, AnyChainFilter, ChainFilter, OneChainFilter, + create_ethereum_networks, create_firehose_networks, networks_as_chains, AnyChainFilter, + ChainFilter, OneChainFilter, }; #[derive(Debug, Clone)] @@ -55,21 +55,19 @@ pub struct FirehoseAdapterConfig { pub enum AdapterConfiguration { Rpc(EthAdapterConfig), Firehose(FirehoseAdapterConfig), - Substreams(FirehoseAdapterConfig), } impl AdapterConfiguration { pub fn blockchain_kind(&self) -> &BlockchainKind { match self { AdapterConfiguration::Rpc(_) => &BlockchainKind::Ethereum, - AdapterConfiguration::Firehose(fh) | AdapterConfiguration::Substreams(fh) => &fh.kind, + AdapterConfiguration::Firehose(fh) => &fh.kind, } } pub fn chain_id(&self) -> &ChainName { match self { AdapterConfiguration::Rpc(EthAdapterConfig { chain_id, .. }) - | AdapterConfiguration::Firehose(FirehoseAdapterConfig { chain_id, .. }) - | AdapterConfiguration::Substreams(FirehoseAdapterConfig { chain_id, .. }) => chain_id, + | AdapterConfiguration::Firehose(FirehoseAdapterConfig { chain_id, .. }) => chain_id, } } @@ -90,24 +88,12 @@ impl AdapterConfiguration { pub fn is_firehose(&self) -> bool { self.as_firehose().is_none() } - - pub fn as_substreams(&self) -> Option<&FirehoseAdapterConfig> { - match self { - AdapterConfiguration::Substreams(fh) => Some(fh), - _ => None, - } - } - - pub fn is_substreams(&self) -> bool { - self.as_substreams().is_none() - } } pub struct Networks { pub adapters: Vec, pub rpc_provider_manager: ProviderManager, pub firehose_provider_manager: ProviderManager>, - pub substreams_provider_manager: ProviderManager>, } impl Networks { @@ -125,11 +111,6 @@ impl Networks { vec![].into_iter(), ProviderCheckStrategy::MarkAsValid, ), - substreams_provider_manager: ProviderManager::new( - Logger::root(Discard, o!()), - vec![].into_iter(), - ProviderCheckStrategy::MarkAsValid, - ), } } @@ -172,14 +153,6 @@ impl Networks { "firehose", ) }) - .or_else(|_| { - get_identifier( - self.substreams_provider_manager.clone(), - logger, - chain_id, - "substreams", - ) - }) .await } @@ -209,17 +182,7 @@ impl Networks { endpoint_metrics.cheap_clone(), chain_filter, ); - let substreams = create_substreams_networks( - logger.cheap_clone(), - &config, - endpoint_metrics, - chain_filter, - ); - let adapters: Vec<_> = eth - .into_iter() - .chain(firehose.into_iter()) - .chain(substreams.into_iter()) - .collect(); + let adapters: Vec<_> = eth.into_iter().chain(firehose.into_iter()).collect(); Ok(Networks::new(&logger, adapters, provider_checks)) } @@ -298,19 +261,6 @@ impl Networks { ) .collect_vec(); - let substreams_adapters = adapters - .iter() - .flat_map(|a| a.as_substreams()) - .cloned() - .map( - |FirehoseAdapterConfig { - chain_id, - kind: _, - adapters, - }| { (chain_id, adapters) }, - ) - .collect_vec(); - let s = Self { adapters: adapters2, rpc_provider_manager: ProviderManager::new( @@ -325,13 +275,6 @@ impl Networks { .map(|(chain_id, endpoints)| (chain_id, endpoints)), ProviderCheckStrategy::RequireAll(provider_checks), ), - substreams_provider_manager: ProviderManager::new( - logger.clone(), - substreams_adapters - .into_iter() - .map(|(chain_id, endpoints)| (chain_id, endpoints)), - ProviderCheckStrategy::RequireAll(provider_checks), - ), }; s @@ -379,21 +322,9 @@ impl Networks { BlockchainKind::Near => { block_ingestor::(logger, id, chain, &mut res).await? } - BlockchainKind::Substreams => {} } } - // substreams networks that also have other types of chain(rpc or firehose), will have - // block ingestors already running. - let visited: Vec<_> = res.iter().map(|b| b.network_name()).collect(); - - for ((_, id), chain) in blockchain_map - .iter() - .filter(|((kind, id), _)| BlockchainKind::Substreams.eq(&kind) && !visited.contains(id)) - { - block_ingestor::(logger, id, chain, &mut res).await? - } - Ok(res) } @@ -427,10 +358,6 @@ impl Networks { FirehoseEndpoints::new(chain_id, self.firehose_provider_manager.clone()) } - pub fn substreams_endpoints(&self, chain_id: ChainName) -> FirehoseEndpoints { - FirehoseEndpoints::new(chain_id, self.substreams_provider_manager.clone()) - } - pub fn ethereum_rpcs(&self, chain_id: ChainName) -> EthereumNetworkAdapters { let eth_adapters = self .adapters diff --git a/runtime/wasm/src/host.rs b/runtime/wasm/src/host.rs index aa079381a94..1f21166c359 100644 --- a/runtime/wasm/src/host.rs +++ b/runtime/wasm/src/host.rs @@ -5,7 +5,7 @@ use async_trait::async_trait; use graph::futures01::sync::mpsc::Sender; use graph::futures03::channel::oneshot::channel; -use graph::blockchain::{BlockTime, Blockchain, HostFn, RuntimeAdapter}; +use graph::blockchain::{Blockchain, HostFn, RuntimeAdapter}; use graph::components::store::{EnsLookup, SubgraphFork}; use graph::components::subgraph::{MappingError, SharedProofOfIndexing}; use graph::data_source::{ @@ -221,74 +221,6 @@ where // Discard the gas value result.map(|(block_state, _)| block_state) } - - async fn send_wasm_block_request( - &self, - logger: &Logger, - state: BlockState, - block_ptr: BlockPtr, - timestamp: BlockTime, - block_data: Box<[u8]>, - handler: String, - proof_of_indexing: SharedProofOfIndexing, - debug_fork: &Option>, - instrument: bool, - ) -> Result { - trace!( - logger, "Start processing wasm block"; - "block_ptr" => &block_ptr, - "handler" => &handler, - "data_source" => &self.data_source.name(), - ); - - let (result_sender, result_receiver) = channel(); - let start_time = Instant::now(); - let metrics = self.metrics.clone(); - - self.mapping_request_sender - .clone() - .send(WasmRequest::new_block( - MappingContext { - logger: logger.cheap_clone(), - state, - host_exports: self.host_exports.cheap_clone(), - block_ptr: block_ptr.clone(), - timestamp, - proof_of_indexing, - host_fns: self.host_fns.cheap_clone(), - debug_fork: debug_fork.cheap_clone(), - mapping_logger: Logger::new(&logger, o!("component" => "UserBlockMapping")), - instrument, - }, - handler.clone(), - block_data, - result_sender, - )) - .compat() - .await - .context("Mapping terminated before passing in wasm block")?; - - let result = result_receiver - .await - .context("Mapping terminated before handling block")?; - - let elapsed = start_time.elapsed(); - metrics.observe_handler_execution_time(elapsed.as_secs_f64(), &handler); - - // If there is an error, "gas_used" is incorrectly reported as 0. - let gas_used = result.as_ref().map(|(_, gas)| gas).unwrap_or(&Gas::ZERO); - info!( - logger, "Done processing wasm block"; - "block_ptr" => &block_ptr, - "total_ms" => elapsed.as_millis(), - "handler" => handler, - "data_source" => &self.data_source.name(), - "gas_used" => gas_used.to_string(), - ); - - // Discard the gas value - result.map(|(block_state, _)| block_state) - } } #[async_trait] @@ -306,32 +238,6 @@ impl RuntimeHostTrait for RuntimeHost { self.data_source.match_and_decode(trigger, block, logger) } - async fn process_block( - &self, - logger: &Logger, - block_ptr: BlockPtr, - block_time: BlockTime, - block_data: Box<[u8]>, - handler: String, - state: BlockState, - proof_of_indexing: SharedProofOfIndexing, - debug_fork: &Option>, - instrument: bool, - ) -> Result { - self.send_wasm_block_request( - logger, - state, - block_ptr, - block_time, - block_data, - handler, - proof_of_indexing, - debug_fork, - instrument, - ) - .await - } - async fn process_mapping_trigger( &self, logger: &Logger, diff --git a/runtime/wasm/src/mapping.rs b/runtime/wasm/src/mapping.rs index 0e06c125c1a..75c7cd64042 100644 --- a/runtime/wasm/src/mapping.rs +++ b/runtime/wasm/src/mapping.rs @@ -81,10 +81,6 @@ where handle_trigger(&logger, module, trigger, host_metrics.cheap_clone()) .await } - WasmRequestInner::BlockRequest(BlockRequest { - block_data, - handler, - }) => module.handle_block(&logger, &handler, block_data).await, }, Err(e) => Err(MappingError::Unknown(e)), } @@ -181,32 +177,10 @@ impl WasmRequest { result_sender, } } - - pub(crate) fn new_block( - ctx: MappingContext, - handler: String, - block_data: Box<[u8]>, - result_sender: Sender>, - ) -> Self { - WasmRequest { - ctx, - inner: WasmRequestInner::BlockRequest(BlockRequest { - handler, - block_data, - }), - result_sender, - } - } } pub enum WasmRequestInner { TriggerRequest(TriggerWithHandler>), - BlockRequest(BlockRequest), -} - -pub struct BlockRequest { - pub(crate) handler: String, - pub(crate) block_data: Box<[u8]>, } pub struct MappingContext { diff --git a/runtime/wasm/src/module/instance.rs b/runtime/wasm/src/module/instance.rs index 21560bb4fe5..0b6617bcb24 100644 --- a/runtime/wasm/src/module/instance.rs +++ b/runtime/wasm/src/module/instance.rs @@ -17,7 +17,7 @@ use graph::prelude::*; use graph::runtime::{ asc_new, gas::{Gas, GasCounter, SaturatingInto}, - HostExportError, ToAscObj, + HostExportError, }; use graph::{components::subgraph::MappingError, runtime::AscPtr}; @@ -111,22 +111,6 @@ impl WasmInstance { Ok(wasm_ctx.take_state()) } - pub(crate) async fn handle_block( - mut self, - _logger: &Logger, - handler_name: &str, - block_data: Box<[u8]>, - ) -> Result<(BlockState, Gas), MappingError> { - let gas = self.gas.clone(); - let mut ctx = self.instance_ctx(); - let obj = block_data.to_vec().to_asc_obj(&mut ctx, &gas).await?; - - let obj = AscPtr::alloc_obj(obj, &mut ctx, &gas).await?; - - self.invoke_handler(handler_name, obj, Arc::new(o!()), None) - .await - } - pub(crate) async fn handle_trigger( mut self, trigger: TriggerWithHandler>, diff --git a/server/index-node/Cargo.toml b/server/index-node/Cargo.toml index 103cba19f96..9672f657e4a 100644 --- a/server/index-node/Cargo.toml +++ b/server/index-node/Cargo.toml @@ -10,5 +10,4 @@ graph = { path = "../../graph" } graph-graphql = { path = "../../graphql" } graph-chain-ethereum = { path = "../../chain/ethereum" } graph-chain-near = { path = "../../chain/near" } -graph-chain-substreams = { path = "../../chain/substreams" } git-testament = "0.2.6" diff --git a/server/index-node/src/resolver.rs b/server/index-node/src/resolver.rs index 76dd25414d0..5714a128c77 100644 --- a/server/index-node/src/resolver.rs +++ b/server/index-node/src/resolver.rs @@ -562,24 +562,6 @@ where ) .await? } - BlockchainKind::Substreams => { - let unvalidated_subgraph_manifest = - UnvalidatedSubgraphManifest::::resolve( - deployment_hash.clone(), - raw_yaml, - &self.link_resolver, - self.amp_client.cheap_clone(), - &self.logger, - max_spec_version, - ) - .await?; - - Self::validate_and_extract_features( - &self.store.subgraph_store(), - unvalidated_subgraph_manifest, - ) - .await? - } }; Ok(result) @@ -694,7 +676,7 @@ where // type. match BlockchainKind::Ethereum { // Note: we don't actually care about substreams here. - BlockchainKind::Substreams | BlockchainKind::Ethereum | BlockchainKind::Near => (), + BlockchainKind::Ethereum | BlockchainKind::Near => (), } // The given network does not exist. diff --git a/store/test-store/src/store.rs b/store/test-store/src/store.rs index a671e770a6f..5719caaa9ec 100644 --- a/store/test-store/src/store.rs +++ b/store/test-store/src/store.rs @@ -123,6 +123,7 @@ where } /// Run a test with a connection into the primary database, not a full store +#[allow(clippy::await_holding_lock)] pub async fn run_test_with_conn(test: F) where F: AsyncFnOnce(&mut AsyncPgConnection), diff --git a/substreams/substreams-head-tracker/Cargo.lock b/substreams/substreams-head-tracker/Cargo.lock deleted file mode 100755 index 92ad0a04eef..00000000000 --- a/substreams/substreams-head-tracker/Cargo.lock +++ /dev/null @@ -1,583 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "aho-corasick" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" -dependencies = [ - "memchr", -] - -[[package]] -name = "anyhow" -version = "1.0.75" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" - -[[package]] -name = "autocfg" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" - -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - -[[package]] -name = "bigdecimal" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6773ddc0eafc0e509fb60e48dff7f450f8e674a0686ae8605e8d9901bd5eefa" -dependencies = [ - "num-bigint", - "num-integer", - "num-traits", -] - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" - -[[package]] -name = "bytes" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" - -[[package]] -name = "cc" -version = "1.0.83" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" -dependencies = [ - "libc", -] - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "either" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" - -[[package]] -name = "equivalent" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" - -[[package]] -name = "errno" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f" -dependencies = [ - "errno-dragonfly", - "libc", - "windows-sys", -] - -[[package]] -name = "errno-dragonfly" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "fastrand" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" - -[[package]] -name = "fixedbitset" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" - -[[package]] -name = "hashbrown" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" - -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - -[[package]] -name = "hex-literal" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" - -[[package]] -name = "indexmap" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" -dependencies = [ - "equivalent", - "hashbrown", -] - -[[package]] -name = "itertools" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" -dependencies = [ - "either", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.147" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" - -[[package]] -name = "linux-raw-sys" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57bcfdad1b858c2db7c38303a6d2ad4dfaf5eb53dfeb0910128b2c26d6158503" - -[[package]] -name = "log" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" - -[[package]] -name = "memchr" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" - -[[package]] -name = "multimap" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" - -[[package]] -name = "num-bigint" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-integer" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" -dependencies = [ - "autocfg", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" -dependencies = [ - "autocfg", -] - -[[package]] -name = "once_cell" -version = "1.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" - -[[package]] -name = "pad" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2ad9b889f1b12e0b9ee24db044b5129150d5eada288edc800f789928dc8c0e3" -dependencies = [ - "unicode-width", -] - -[[package]] -name = "petgraph" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" -dependencies = [ - "fixedbitset", - "indexmap", -] - -[[package]] -name = "prettyplease" -version = "0.1.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" -dependencies = [ - "proc-macro2", - "syn 1.0.109", -] - -[[package]] -name = "proc-macro2" -version = "1.0.66" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "prost" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" -dependencies = [ - "bytes", - "prost-derive", -] - -[[package]] -name = "prost-build" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" -dependencies = [ - "bytes", - "heck", - "itertools", - "lazy_static", - "log", - "multimap", - "petgraph", - "prettyplease", - "prost", - "prost-types", - "regex", - "syn 1.0.109", - "tempfile", - "which", -] - -[[package]] -name = "prost-derive" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "prost-types" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" -dependencies = [ - "prost", -] - -[[package]] -name = "quote" -version = "1.0.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "redox_syscall" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "regex" -version = "1.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29" -dependencies = [ - "aho-corasick", - "memchr", - "regex-automata", - "regex-syntax", -] - -[[package]] -name = "regex-automata" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" - -[[package]] -name = "rustix" -version = "0.38.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bfe0f2582b4931a45d1fa608f8a8722e8b3c7ac54dd6d5f3b3212791fedef49" -dependencies = [ - "bitflags 2.4.0", - "errno", - "libc", - "linux-raw-sys", - "windows-sys", -] - -[[package]] -name = "substreams" -version = "0.5.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af972e374502cdfc9998132f5343848d1c58f27a295dc061a89804371f408a46" -dependencies = [ - "anyhow", - "bigdecimal", - "hex", - "hex-literal", - "num-bigint", - "num-traits", - "pad", - "prost", - "prost-build", - "prost-types", - "substreams-macro", - "thiserror", -] - -[[package]] -name = "substreams-entity-change" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d423d0c12a9284a3d6d4ec288dbc9bfec3d55f9056098ba91a6dcfa64fb3889e" -dependencies = [ - "base64", - "prost", - "prost-types", - "substreams", -] - -[[package]] -name = "substreams-head-tracker" -version = "1.0.0" -dependencies = [ - "prost", - "substreams", - "substreams-entity-change", -] - -[[package]] -name = "substreams-macro" -version = "0.5.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6521ccd011a4c3f52cd3c31fc7400733e4feba2094e0e0e6354adca25b2b3f37" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", - "thiserror", -] - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "tempfile" -version = "3.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" -dependencies = [ - "cfg-if", - "fastrand", - "redox_syscall", - "rustix", - "windows-sys", -] - -[[package]] -name = "thiserror" -version = "1.0.47" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97a802ec30afc17eee47b2855fc72e0c4cd62be9b4efe6591edde0ec5bd68d8f" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.47" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.29", -] - -[[package]] -name = "unicode-ident" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" - -[[package]] -name = "unicode-width" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" - -[[package]] -name = "which" -version = "4.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" -dependencies = [ - "either", - "libc", - "once_cell", -] - -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-targets" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" - -[[package]] -name = "windows_i686_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" - -[[package]] -name = "windows_i686_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" diff --git a/substreams/substreams-head-tracker/Cargo.toml b/substreams/substreams-head-tracker/Cargo.toml deleted file mode 100755 index 2548160f736..00000000000 --- a/substreams/substreams-head-tracker/Cargo.toml +++ /dev/null @@ -1,8 +0,0 @@ -[package] -name = "substreams-head-tracker" -version.workspace = true -edition.workspace = true - -[lib] -crate-type = ["cdylib"] - diff --git a/substreams/substreams-head-tracker/Makefile b/substreams/substreams-head-tracker/Makefile deleted file mode 100755 index 9ef9e5c3f70..00000000000 --- a/substreams/substreams-head-tracker/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -ENDPOINT ?= mainnet.eth.streamingfast.io:443 -START_BLOCK ?= 16000000 -STOP_BLOCK ?= +100 - -.PHONY: build -build: - cargo build --target wasm32-unknown-unknown --release - -.PHONY: run -run: build - substreams run -e $(ENDPOINT) substreams.yaml map_blocks -s $(START_BLOCK) -t $(STOP_BLOCK) - -.PHONY: pack -pack: build - substreams pack substreams.yaml diff --git a/substreams/substreams-head-tracker/rust-toolchain.toml b/substreams/substreams-head-tracker/rust-toolchain.toml deleted file mode 100755 index a09cf93404f..00000000000 --- a/substreams/substreams-head-tracker/rust-toolchain.toml +++ /dev/null @@ -1,2 +0,0 @@ -[toolchain] -targets = [ "wasm32-unknown-unknown" ] \ No newline at end of file diff --git a/substreams/substreams-head-tracker/src/lib.rs b/substreams/substreams-head-tracker/src/lib.rs deleted file mode 100644 index ee880963011..00000000000 --- a/substreams/substreams-head-tracker/src/lib.rs +++ /dev/null @@ -1,19 +0,0 @@ -#![cfg(target_arch = "wasm32")] - -#[no_mangle] -pub extern "C" fn map_blocks(_params_ptr: *mut u8, _params_len: usize) {} - -#[no_mangle] -pub fn alloc(size: usize) -> *mut u8 { - let mut buf = Vec::with_capacity(size); - let ptr = buf.as_mut_ptr(); - - // Runtime is responsible of calling dealloc when no longer needed - std::mem::forget(buf); - ptr -} - -#[no_mangle] -pub unsafe fn dealloc(ptr: *mut u8, size: usize) { - std::mem::drop(Vec::from_raw_parts(ptr, size, size)) -} diff --git a/substreams/substreams-head-tracker/substreams-head-tracker-v1.0.0.spkg b/substreams/substreams-head-tracker/substreams-head-tracker-v1.0.0.spkg deleted file mode 100644 index 2e44fdf53c6..00000000000 Binary files a/substreams/substreams-head-tracker/substreams-head-tracker-v1.0.0.spkg and /dev/null differ diff --git a/substreams/substreams-head-tracker/substreams.yaml b/substreams/substreams-head-tracker/substreams.yaml deleted file mode 100755 index 07d38afeeca..00000000000 --- a/substreams/substreams-head-tracker/substreams.yaml +++ /dev/null @@ -1,17 +0,0 @@ -specVersion: v0.1.0 -package: - name: substreams_head_tracker - version: v1.0.0 - -binaries: - default: - type: wasm/rust-v1 - file: ./target/wasm32-unknown-unknown/release/substreams.wasm - -modules: - - name: map_blocks - kind: map - inputs: - - params: string - output: - type: proto:sf.substreams.entity.v1.EntityChanges diff --git a/substreams/substreams-trigger-filter/Cargo.lock b/substreams/substreams-trigger-filter/Cargo.lock deleted file mode 100755 index 5a22905c7f5..00000000000 --- a/substreams/substreams-trigger-filter/Cargo.lock +++ /dev/null @@ -1,498 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "anyhow" -version = "1.0.66" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" - -[[package]] -name = "autocfg" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" - -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - -[[package]] -name = "bigdecimal" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6aaf33151a6429fe9211d1b276eafdf70cdff28b071e76c0b0e1503221ea3744" -dependencies = [ - "num-bigint", - "num-integer", - "num-traits", -] - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bs58" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" - -[[package]] -name = "bytes" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "either" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" - -[[package]] -name = "fastrand" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" -dependencies = [ - "instant", -] - -[[package]] -name = "fixedbitset" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" - -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - -[[package]] -name = "heck" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - -[[package]] -name = "hex-literal" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" - -[[package]] -name = "indexmap" -version = "1.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" -dependencies = [ - "autocfg", - "hashbrown", -] - -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "itertools" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" -dependencies = [ - "either", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.138" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db6d7e329c562c5dfab7a46a2afabc8b987ab9a4834c9d1ca04dc54c1546cef8" - -[[package]] -name = "log" -version = "0.4.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "multimap" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" - -[[package]] -name = "num-bigint" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-integer" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" -dependencies = [ - "autocfg", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" -dependencies = [ - "autocfg", -] - -[[package]] -name = "once_cell" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" - -[[package]] -name = "pad" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2ad9b889f1b12e0b9ee24db044b5129150d5eada288edc800f789928dc8c0e3" -dependencies = [ - "unicode-width", -] - -[[package]] -name = "petgraph" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" -dependencies = [ - "fixedbitset", - "indexmap", -] - -[[package]] -name = "prettyplease" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c142c0e46b57171fe0c528bee8c5b7569e80f0c17e377cd0e30ea57dbc11bb51" -dependencies = [ - "proc-macro2", - "syn", -] - -[[package]] -name = "proc-macro2" -version = "1.0.47" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "prost" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0b18e655c21ff5ac2084a5ad0611e827b3f92badf79f4910b5a5c58f4d87ff0" -dependencies = [ - "bytes", - "prost-derive", -] - -[[package]] -name = "prost-build" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e330bf1316db56b12c2bcfa399e8edddd4821965ea25ddb2c134b610b1c1c604" -dependencies = [ - "bytes", - "heck", - "itertools", - "lazy_static", - "log", - "multimap", - "petgraph", - "prettyplease", - "prost", - "prost-types", - "regex", - "syn", - "tempfile", - "which", -] - -[[package]] -name = "prost-derive" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "164ae68b6587001ca506d3bf7f1000bfa248d0e1217b618108fba4ec1d0cc306" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "prost-types" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "747761bc3dc48f9a34553bf65605cf6cb6288ba219f3450b4275dbd81539551a" -dependencies = [ - "bytes", - "prost", -] - -[[package]] -name = "quote" -version = "1.0.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags", -] - -[[package]] -name = "regex" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" -dependencies = [ - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.6.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" - -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] - -[[package]] -name = "substreams" -version = "0.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ea94f238b54b075ad17894537bdcc20d5fc65cdc199bf1594c9ecfdc6454840" -dependencies = [ - "anyhow", - "bigdecimal", - "hex", - "hex-literal", - "num-bigint", - "num-traits", - "pad", - "prost", - "prost-build", - "prost-types", - "substreams-macro", - "thiserror", -] - -[[package]] -name = "substreams-entity-change" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d423d0c12a9284a3d6d4ec288dbc9bfec3d55f9056098ba91a6dcfa64fb3889e" -dependencies = [ - "base64", - "prost", - "prost-types", - "substreams", -] - -[[package]] -name = "substreams-filter" -version = "0.0.1" -dependencies = [ - "hex", - "prost", - "substreams", - "substreams-entity-change", - "substreams-near-core", - "tonic-build", -] - -[[package]] -name = "substreams-macro" -version = "0.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9df3ebfeefa8958b1de17f7e9e80f9b1d9a78cbe9114716a872a52b60b8343" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "thiserror", -] - -[[package]] -name = "substreams-near-core" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9922f437e6cb86b62cfd8bdede93937def710616ac2825ffff06b8770bbd06df" -dependencies = [ - "bs58", - "prost", - "prost-build", - "prost-types", -] - -[[package]] -name = "syn" -version = "1.0.105" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "tempfile" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" -dependencies = [ - "cfg-if", - "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi", -] - -[[package]] -name = "thiserror" -version = "1.0.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tonic-build" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf5e9b9c0f7e0a7c027dcfaba7b2c60816c7049171f679d99ee2ff65d0de8c4" -dependencies = [ - "prettyplease", - "proc-macro2", - "prost-build", - "quote", - "syn", -] - -[[package]] -name = "unicode-ident" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" - -[[package]] -name = "unicode-width" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" - -[[package]] -name = "which" -version = "4.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b" -dependencies = [ - "either", - "libc", - "once_cell", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/substreams/substreams-trigger-filter/Cargo.toml b/substreams/substreams-trigger-filter/Cargo.toml deleted file mode 100644 index f1880c3412b..00000000000 --- a/substreams/substreams-trigger-filter/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "substreams-trigger-filter" -version.workspace = true -edition.workspace = true - -[lib] -doc = false -name = "substreams" -crate-type = ["cdylib"] - -[dependencies] -hex = { version = "0.4", default-features = false } -prost.workspace = true -substreams.workspace = true -substreams-entity-change.workspace = true -substreams-near-core.workspace = true - -trigger-filters.path = "../trigger-filters" - -[build-dependencies] -tonic-build.workspace = true diff --git a/substreams/substreams-trigger-filter/Makefile b/substreams/substreams-trigger-filter/Makefile deleted file mode 100755 index 365b6f05178..00000000000 --- a/substreams/substreams-trigger-filter/Makefile +++ /dev/null @@ -1,35 +0,0 @@ -ENDPOINT ?= mainnet.near.streamingfast.io:443 -START_BLOCK ?= 96764162 -STOP_BLOCK ?= +100 - -.PHONY: build -build: - cargo build --target wasm32-unknown-unknown --release - -.PHONY: run -run: build - substreams run -e $(ENDPOINT) substreams.yaml near_filter -s $(START_BLOCK) -t $(STOP_BLOCK) $(ARGS) - -.PHONY: gui -gui: build - substreams gui -e $(ENDPOINT) substreams.yaml map_block -s $(START_BLOCK) -t $(STOP_BLOCK) - -# .PHONY: protogen -# protogen: -# substreams protogen ./substreams.yaml --exclude-paths="sf/substreams,google" - -.PHONY: pack -pack: build - substreams pack substreams.yaml - -.PHONY: deploy_local -deploy_local: pack - mkdir build 2> /dev/null || true - bun x graph build --ipfs http://localhost:5001 subgraph.yaml - bun x graph create map_block --node http://127.0.0.1:8020 - bun x graph deploy --node http://127.0.0.1:8020 --ipfs http://127.0.0.1:5001 --version-label v0.0.1 map_block subgraph.yaml - -.PHONY: undeploy_local -undeploy_local: - graphman --config "$(GRAPH_CONFIG)" drop --force uniswap_v3 - diff --git a/substreams/substreams-trigger-filter/build.rs b/substreams/substreams-trigger-filter/build.rs deleted file mode 100644 index 22b972babc5..00000000000 --- a/substreams/substreams-trigger-filter/build.rs +++ /dev/null @@ -1,12 +0,0 @@ -fn main() { - println!("cargo:rerun-if-changed=proto"); - tonic_build::configure() - .protoc_arg("--experimental_allow_proto3_optional") - .extern_path( - ".sf.near.codec.v1", - "::substreams_near_core::pb::sf::near::type::v1", - ) - .out_dir("src/pb") - .compile_protos(&["proto/receipts.proto"], &["proto"]) - .expect("Failed to compile Substreams entity proto(s)"); -} diff --git a/substreams/substreams-trigger-filter/bun.lockb b/substreams/substreams-trigger-filter/bun.lockb deleted file mode 100755 index 7f816d7b37d..00000000000 Binary files a/substreams/substreams-trigger-filter/bun.lockb and /dev/null differ diff --git a/substreams/substreams-trigger-filter/package.json b/substreams/substreams-trigger-filter/package.json deleted file mode 100644 index 00b628b1e1b..00000000000 --- a/substreams/substreams-trigger-filter/package.json +++ /dev/null @@ -1 +0,0 @@ -{ "dependencies": { "@graphprotocol/graph-cli": "^0.92.0" } } \ No newline at end of file diff --git a/substreams/substreams-trigger-filter/proto/near.proto b/substreams/substreams-trigger-filter/proto/near.proto deleted file mode 100644 index 22a0267669a..00000000000 --- a/substreams/substreams-trigger-filter/proto/near.proto +++ /dev/null @@ -1,521 +0,0 @@ -syntax = "proto3"; - -package sf.near.codec.v1; - -option go_package = "github.com/streamingfast/sf-near/pb/sf/near/codec/v1;pbcodec"; - -message Block { - string author = 1; - BlockHeader header = 2; - repeated ChunkHeader chunk_headers = 3; - repeated IndexerShard shards = 4; - repeated StateChangeWithCause state_changes = 5; -} - -// HeaderOnlyBlock is a standard [Block] structure where all other fields are -// removed so that hydrating that object from a [Block] bytes payload will -// drastically reduced allocated memory required to hold the full block. -// -// This can be used to unpack a [Block] when only the [BlockHeader] information -// is required and greatly reduced required memory. -message HeaderOnlyBlock { - BlockHeader header = 2; -} - -message StateChangeWithCause { - StateChangeValue value = 1; - StateChangeCause cause = 2; -} - -message StateChangeCause { - oneof cause { - NotWritableToDisk not_writable_to_disk = 1; - InitialState initial_state = 2; - TransactionProcessing transaction_processing = 3; - ActionReceiptProcessingStarted action_receipt_processing_started = 4; - ActionReceiptGasReward action_receipt_gas_reward = 5; - ReceiptProcessing receipt_processing = 6; - PostponedReceipt postponed_receipt = 7; - UpdatedDelayedReceipts updated_delayed_receipts = 8; - ValidatorAccountsUpdate validator_accounts_update = 9; - Migration migration = 10; - } - - message NotWritableToDisk {} - message InitialState {} - message TransactionProcessing {CryptoHash tx_hash = 1;} - message ActionReceiptProcessingStarted {CryptoHash receipt_hash = 1;} - message ActionReceiptGasReward {CryptoHash tx_hash = 1;} - message ReceiptProcessing {CryptoHash tx_hash = 1;} - message PostponedReceipt {CryptoHash tx_hash = 1;} - message UpdatedDelayedReceipts {} - message ValidatorAccountsUpdate {} - message Migration {} -} - -message StateChangeValue { - oneof value { - AccountUpdate account_update = 1; - AccountDeletion account_deletion = 2; - AccessKeyUpdate access_key_update = 3; - AccessKeyDeletion access_key_deletion = 4; - DataUpdate data_update = 5; - DataDeletion data_deletion = 6; - ContractCodeUpdate contract_code_update = 7; - ContractCodeDeletion contract_deletion = 8; - } - - message AccountUpdate {string account_id = 1; Account account = 2;} - message AccountDeletion {string account_id = 1;} - message AccessKeyUpdate { - string account_id = 1; - PublicKey public_key = 2; - AccessKey access_key = 3; - } - message AccessKeyDeletion { - string account_id = 1; - PublicKey public_key = 2; - } - message DataUpdate { - string account_id = 1; - bytes key = 2; - bytes value = 3; - } - message DataDeletion { - string account_id = 1; - bytes key = 2; - } - message ContractCodeUpdate { - string account_id = 1; - bytes code = 2; - } - message ContractCodeDeletion { - string account_id = 1; - } -} - -message Account { - BigInt amount = 1; - BigInt locked = 2; - CryptoHash code_hash = 3; - uint64 storage_usage = 4; -} - -message BlockHeader { - uint64 height = 1; - uint64 prev_height = 2; - CryptoHash epoch_id = 3; - CryptoHash next_epoch_id = 4; - CryptoHash hash = 5; - CryptoHash prev_hash = 6; - CryptoHash prev_state_root = 7; - CryptoHash chunk_receipts_root = 8; - CryptoHash chunk_headers_root = 9; - CryptoHash chunk_tx_root = 10; - CryptoHash outcome_root = 11; - uint64 chunks_included = 12; - CryptoHash challenges_root = 13; - uint64 timestamp = 14; - uint64 timestamp_nanosec = 15; - CryptoHash random_value = 16; - repeated ValidatorStake validator_proposals = 17; - repeated bool chunk_mask = 18; - BigInt gas_price = 19; - uint64 block_ordinal = 20; - BigInt total_supply = 21; - repeated SlashedValidator challenges_result = 22; - uint64 last_final_block_height = 23; - CryptoHash last_final_block = 24; - uint64 last_ds_final_block_height = 25; - CryptoHash last_ds_final_block = 26; - CryptoHash next_bp_hash = 27; - CryptoHash block_merkle_root = 28; - bytes epoch_sync_data_hash = 29; - repeated Signature approvals = 30; - Signature signature = 31; - uint32 latest_protocol_version = 32; -} - -message BigInt { - bytes bytes = 1; -} -message CryptoHash { - bytes bytes = 1; -} - -enum CurveKind { - ED25519 = 0; - SECP256K1 = 1; -} - -message Signature { - CurveKind type = 1; - bytes bytes = 2; -} - -message PublicKey { - CurveKind type = 1; - bytes bytes = 2; -} - -message ValidatorStake { - string account_id = 1; - PublicKey public_key = 2; - BigInt stake = 3; -} - -message SlashedValidator { - string account_id = 1; - bool is_double_sign = 2; -} - -message ChunkHeader { - bytes chunk_hash = 1; - bytes prev_block_hash = 2; - bytes outcome_root = 3; - bytes prev_state_root = 4; - bytes encoded_merkle_root = 5; - uint64 encoded_length = 6; - uint64 height_created = 7; - uint64 height_included = 8; - uint64 shard_id = 9; - uint64 gas_used = 10; - uint64 gas_limit = 11; - BigInt validator_reward = 12; - BigInt balance_burnt = 13; - bytes outgoing_receipts_root = 14; - bytes tx_root = 15; - repeated ValidatorStake validator_proposals = 16; - Signature signature = 17; -} - -message IndexerShard { - uint64 shard_id = 1; - IndexerChunk chunk = 2; - repeated IndexerExecutionOutcomeWithReceipt receipt_execution_outcomes = 3; -} - -message IndexerExecutionOutcomeWithReceipt { - ExecutionOutcomeWithId execution_outcome = 1; - Receipt receipt = 2; -} - -message IndexerChunk { - string author = 1; - ChunkHeader header = 2; - repeated IndexerTransactionWithOutcome transactions = 3; - repeated Receipt receipts = 4; -} - -message IndexerTransactionWithOutcome { - SignedTransaction transaction = 1; - IndexerExecutionOutcomeWithOptionalReceipt outcome = 2; -} - -message SignedTransaction { - string signer_id = 1; - PublicKey public_key = 2; - uint64 nonce = 3; - string receiver_id = 4; - repeated Action actions = 5; - Signature signature = 6; - CryptoHash hash = 7; -} - -message IndexerExecutionOutcomeWithOptionalReceipt { - ExecutionOutcomeWithId execution_outcome = 1; - Receipt receipt = 2; -} - -message Receipt { - string predecessor_id = 1; - string receiver_id = 2; - CryptoHash receipt_id = 3; - - oneof receipt { - ReceiptAction action = 10; - ReceiptData data = 11; - } -} - -message ReceiptData { - CryptoHash data_id = 1; - bytes data = 2; -} - -message ReceiptAction { - string signer_id = 1; - PublicKey signer_public_key = 2; - BigInt gas_price = 3; - repeated DataReceiver output_data_receivers = 4; - repeated CryptoHash input_data_ids = 5; - repeated Action actions = 6; -} - -message DataReceiver { - CryptoHash data_id = 1; - string receiver_id = 2; -} - -message ExecutionOutcomeWithId { - MerklePath proof = 1; - CryptoHash block_hash = 2; - CryptoHash id = 3; - ExecutionOutcome outcome = 4; -} - -message ExecutionOutcome { - repeated string logs = 1; - repeated CryptoHash receipt_ids = 2; - uint64 gas_burnt = 3; - BigInt tokens_burnt = 4; - string executor_id = 5; - oneof status { - UnknownExecutionStatus unknown = 20; - FailureExecutionStatus failure = 21; - SuccessValueExecutionStatus success_value = 22; - SuccessReceiptIdExecutionStatus success_receipt_id = 23; - } - ExecutionMetadata metadata = 6; -} - -enum ExecutionMetadata { - ExecutionMetadataV1 = 0; -} - -message SuccessValueExecutionStatus { - bytes value = 1; -} - -message SuccessReceiptIdExecutionStatus { - CryptoHash id = 1; -} - -message UnknownExecutionStatus {} -message FailureExecutionStatus { - oneof failure { - ActionError action_error = 1; - InvalidTxError invalid_tx_error = 2; - } -} - -message ActionError { - uint64 index = 1; - oneof kind { - AccountAlreadyExistsErrorKind account_already_exist = 21; - AccountDoesNotExistErrorKind account_does_not_exist = 22; - CreateAccountOnlyByRegistrarErrorKind create_account_only_by_registrar = 23; - CreateAccountNotAllowedErrorKind create_account_not_allowed = 24; - ActorNoPermissionErrorKind actor_no_permission =25; - DeleteKeyDoesNotExistErrorKind delete_key_does_not_exist = 26; - AddKeyAlreadyExistsErrorKind add_key_already_exists = 27; - DeleteAccountStakingErrorKind delete_account_staking = 28; - LackBalanceForStateErrorKind lack_balance_for_state = 29; - TriesToUnstakeErrorKind tries_to_unstake = 30; - TriesToStakeErrorKind tries_to_stake = 31; - InsufficientStakeErrorKind insufficient_stake = 32; - FunctionCallErrorKind function_call = 33; - NewReceiptValidationErrorKind new_receipt_validation = 34; - OnlyImplicitAccountCreationAllowedErrorKind only_implicit_account_creation_allowed = 35; - DeleteAccountWithLargeStateErrorKind delete_account_with_large_state = 36; - } -} - -message AccountAlreadyExistsErrorKind { - string account_id = 1; -} - -message AccountDoesNotExistErrorKind { - string account_id = 1; -} - -/// A top-level account ID can only be created by registrar. -message CreateAccountOnlyByRegistrarErrorKind{ - string account_id = 1; - string registrar_account_id = 2; - string predecessor_id = 3; -} - -message CreateAccountNotAllowedErrorKind{ - string account_id = 1; - string predecessor_id = 2; -} - -message ActorNoPermissionErrorKind{ - string account_id = 1; - string actor_id = 2; -} - -message DeleteKeyDoesNotExistErrorKind{ - string account_id = 1; - PublicKey public_key = 2; -} - -message AddKeyAlreadyExistsErrorKind{ - string account_id = 1; - PublicKey public_key = 2; -} - -message DeleteAccountStakingErrorKind{ - string account_id = 1; -} - -message LackBalanceForStateErrorKind{ - string account_id = 1; - BigInt balance = 2; -} - -message TriesToUnstakeErrorKind{ - string account_id = 1; -} - -message TriesToStakeErrorKind{ - string account_id = 1; - BigInt stake = 2; - BigInt locked = 3; - BigInt balance = 4; -} - -message InsufficientStakeErrorKind{ - string account_id = 1; - BigInt stake = 2; - BigInt minimum_stake = 3; -} - -message FunctionCallErrorKind { - FunctionCallErrorSer error = 1; -} - -enum FunctionCallErrorSer { //todo: add more detail? - CompilationError = 0; - LinkError = 1; - MethodResolveError = 2; - WasmTrap = 3; - WasmUnknownError = 4; - HostError = 5; - _EVMError = 6; - ExecutionError = 7; -} - -message NewReceiptValidationErrorKind { - ReceiptValidationError error = 1; -} - -enum ReceiptValidationError { //todo: add more detail? - InvalidPredecessorId = 0; - InvalidReceiverAccountId = 1; - InvalidSignerAccountId = 2; - InvalidDataReceiverId = 3; - ReturnedValueLengthExceeded = 4; - NumberInputDataDependenciesExceeded = 5; - ActionsValidationError = 6; -} - -message OnlyImplicitAccountCreationAllowedErrorKind{ - string account_id = 1; -} - -message DeleteAccountWithLargeStateErrorKind{ - string account_id = 1; -} - -enum InvalidTxError { //todo: add more detail? - InvalidAccessKeyError = 0; - InvalidSignerId = 1; - SignerDoesNotExist = 2; - InvalidNonce = 3; - NonceTooLarge = 4; - InvalidReceiverId = 5; - InvalidSignature = 6; - NotEnoughBalance = 7; - LackBalanceForState = 8; - CostOverflow = 9; - InvalidChain = 10; - Expired = 11; - ActionsValidation = 12; - TransactionSizeExceeded = 13; -} - -message MerklePath { - repeated MerklePathItem path = 1; -} - -message MerklePathItem { - CryptoHash hash = 1; - Direction direction = 2; -} - -enum Direction { - left = 0; - right = 1; -} - -message Action { - oneof action { - CreateAccountAction create_account = 1; - DeployContractAction deploy_contract = 2; - FunctionCallAction function_call = 3; - TransferAction transfer = 4; - StakeAction stake = 5; - AddKeyAction add_key = 6; - DeleteKeyAction delete_key = 7; - DeleteAccountAction delete_account = 8; - } -} - -message CreateAccountAction { -} - -message DeployContractAction { - bytes code = 1; -} - -message FunctionCallAction { - string method_name = 1; - bytes args = 2; - uint64 gas = 3; - BigInt deposit = 4; -} - -message TransferAction { - BigInt deposit = 1; -} - -message StakeAction { - BigInt stake = 1; - PublicKey public_key = 2; -} - -message AddKeyAction { - PublicKey public_key = 1; - AccessKey access_key = 2; -} - -message DeleteKeyAction { - PublicKey public_key = 1; -} - -message DeleteAccountAction { - string beneficiary_id = 1; -} - -message AccessKey { - uint64 nonce = 1; - AccessKeyPermission permission = 2; -} - -message AccessKeyPermission { - oneof permission { - FunctionCallPermission function_call = 1; - FullAccessPermission full_access = 2; - } -} - -message FunctionCallPermission { - BigInt allowance = 1; - string receiver_id = 2; - repeated string method_names = 3; -} - -message FullAccessPermission { -} diff --git a/substreams/substreams-trigger-filter/proto/receipts.proto b/substreams/substreams-trigger-filter/proto/receipts.proto deleted file mode 100755 index d7e4a822573..00000000000 --- a/substreams/substreams-trigger-filter/proto/receipts.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax = "proto3"; - -import "near.proto"; - -package receipts.v1; - -message BlockAndReceipts { - sf.near.codec.v1.Block block = 1; - repeated sf.near.codec.v1.ExecutionOutcomeWithId outcome = 2; - repeated sf.near.codec.v1.Receipt receipt = 3; -} - - - - diff --git a/substreams/substreams-trigger-filter/rust-toolchain.toml b/substreams/substreams-trigger-filter/rust-toolchain.toml deleted file mode 100755 index fde0e8fe57c..00000000000 --- a/substreams/substreams-trigger-filter/rust-toolchain.toml +++ /dev/null @@ -1,2 +0,0 @@ -[toolchain] -targets = [ "wasm32-unknown-unknown" ] diff --git a/substreams/substreams-trigger-filter/schema.graphql b/substreams/substreams-trigger-filter/schema.graphql deleted file mode 100644 index 20e5f730423..00000000000 --- a/substreams/substreams-trigger-filter/schema.graphql +++ /dev/null @@ -1,4 +0,0 @@ -type Block @entity { - id: Bytes! -} - diff --git a/substreams/substreams-trigger-filter/src/lib.rs b/substreams/substreams-trigger-filter/src/lib.rs deleted file mode 100755 index 01109234fdd..00000000000 --- a/substreams/substreams-trigger-filter/src/lib.rs +++ /dev/null @@ -1,99 +0,0 @@ -#![allow(clippy::not_unsafe_ptr_arg_deref)] - -mod pb; - -use pb::receipts::v1::BlockAndReceipts; -use substreams_entity_change::pb::entity::EntityChanges; -use substreams_near_core::pb::sf::near::r#type::v1::{ - execution_outcome, receipt::Receipt, Block, IndexerExecutionOutcomeWithReceipt, -}; -use trigger_filters::NearFilter; - -fn status(outcome: &IndexerExecutionOutcomeWithReceipt) -> Option<&execution_outcome::Status> { - outcome - .execution_outcome - .as_ref() - .and_then(|o| o.outcome.as_ref()) - .and_then(|o| o.status.as_ref()) -} - -fn is_success(outcome: &IndexerExecutionOutcomeWithReceipt) -> bool { - status(outcome) - .map(|s| { - use execution_outcome::Status::*; - - match s { - Unknown(_) | Failure(_) => false, - SuccessValue(_) | SuccessReceiptId(_) => true, - } - }) - .unwrap_or(false) -} - -#[substreams::handlers::map] -fn near_filter(params: String, blk: Block) -> Result { - let mut blk = blk; - let filter = NearFilter::try_from(params.as_str())?; - let mut out = BlockAndReceipts::default(); - - blk.shards = blk - .shards - .into_iter() - .map(|shard| { - let mut shard = shard; - let receipt_execution_outcomes = shard - .receipt_execution_outcomes - .into_iter() - .filter(|outcome| { - if !is_success(&outcome) { - return false; - } - - let execution_outcome = match outcome.execution_outcome.as_ref() { - Some(eo) => eo, - None => return false, - }; - - let receipt = match outcome.receipt.as_ref() { - Some(receipt) => receipt, - None => return false, - }; - - if !matches!(receipt.receipt, Some(Receipt::Action(_))) { - return false; - } - - if !filter.matches(&receipt.receiver_id) { - return false; - } - - out.outcome.push(execution_outcome.clone()); - out.receipt.push(receipt.clone()); - true - }) - .collect(); - shard.receipt_execution_outcomes = receipt_execution_outcomes; - shard - }) - .collect(); - - out.block = Some(blk.clone()); - - Ok(out) -} - -#[substreams::handlers::map] -fn graph_out(blk: Block) -> Result { - let mut out = EntityChanges::default(); - - let hex = hex::encode(&blk.header.as_ref().unwrap().hash.as_ref().unwrap().bytes); - - out.push_change( - "Block", - &hex, - blk.header.unwrap().height, - substreams_entity_change::pb::entity::entity_change::Operation::Create, - ); - - Ok(out) -} diff --git a/substreams/substreams-trigger-filter/src/pb/mod.rs b/substreams/substreams-trigger-filter/src/pb/mod.rs deleted file mode 100755 index be6467ea7fd..00000000000 --- a/substreams/substreams-trigger-filter/src/pb/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -// @generated -pub mod receipts { - // @@protoc_insertion_point(attribute:receipts.v1) - pub mod v1 { - include!("receipts.v1.rs"); - // @@protoc_insertion_point(receipts.v1) - } -} diff --git a/substreams/substreams-trigger-filter/src/pb/receipts.v1.rs b/substreams/substreams-trigger-filter/src/pb/receipts.v1.rs deleted file mode 100644 index 76b6d1fe456..00000000000 --- a/substreams/substreams-trigger-filter/src/pb/receipts.v1.rs +++ /dev/null @@ -1,16 +0,0 @@ -// This file is @generated by prost-build. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockAndReceipts { - #[prost(message, optional, tag = "1")] - pub block: ::core::option::Option< - ::substreams_near_core::pb::sf::near::r#type::v1::Block, - >, - #[prost(message, repeated, tag = "2")] - pub outcome: ::prost::alloc::vec::Vec< - ::substreams_near_core::pb::sf::near::r#type::v1::ExecutionOutcomeWithId, - >, - #[prost(message, repeated, tag = "3")] - pub receipt: ::prost::alloc::vec::Vec< - ::substreams_near_core::pb::sf::near::r#type::v1::Receipt, - >, -} diff --git a/substreams/substreams-trigger-filter/subgraph.yaml b/substreams/substreams-trigger-filter/subgraph.yaml deleted file mode 100644 index 88bf9ebcd1e..00000000000 --- a/substreams/substreams-trigger-filter/subgraph.yaml +++ /dev/null @@ -1,16 +0,0 @@ -specVersion: 0.0.5 -description: NEAR Blocks Indexing -repository: git@github.com:streamingfast/graph-node-dev.git -schema: - file: ./schema.graphql -dataSources: - - kind: substreams - name: hello-world - network: near-mainnet - source: - package: - moduleName: graph_out - file: substreams-near-hello-world-v0.1.0.spkg - mapping: - kind: substreams/graph-entities - apiVersion: 0.0.7 diff --git a/substreams/substreams-trigger-filter/substreams-trigger-filter-v0.1.0.spkg b/substreams/substreams-trigger-filter/substreams-trigger-filter-v0.1.0.spkg deleted file mode 100644 index f1e733c6675..00000000000 Binary files a/substreams/substreams-trigger-filter/substreams-trigger-filter-v0.1.0.spkg and /dev/null differ diff --git a/substreams/substreams-trigger-filter/substreams.yaml b/substreams/substreams-trigger-filter/substreams.yaml deleted file mode 100755 index 6352bedce41..00000000000 --- a/substreams/substreams-trigger-filter/substreams.yaml +++ /dev/null @@ -1,37 +0,0 @@ -specVersion: v0.1.0 -package: - name: substreams_trigger_filter - version: v0.1.0 - -imports: - near: https://github.com/streamingfast/firehose-near/releases/download/v1.1.0/substreams-near-v1.1.0.spkg - -protobuf: - files: - - receipts.proto - - near.proto - importPaths: - - ./proto - -binaries: - default: - type: wasm/rust-v1 - file: ../../target/wasm32-unknown-unknown/release/substreams.wasm - -modules: - - name: near_filter - kind: map - initialBlock: 9820214 - inputs: - - params: string - - source: sf.near.type.v1.Block - output: - type: proto:receipts.v1.Receipts - - name: graph_out - kind: map - initialBlock: 9820214 - inputs: - - source: sf.near.type.v1.Block - output: - type: proto:sf.substreams.entity.v1.EntityChanges - diff --git a/substreams/trigger-filters/Cargo.toml b/substreams/trigger-filters/Cargo.toml deleted file mode 100644 index b1f2db07772..00000000000 --- a/substreams/trigger-filters/Cargo.toml +++ /dev/null @@ -1,7 +0,0 @@ -[package] -name = "trigger-filters" -version.workspace = true -edition.workspace = true - -[dependencies] -anyhow = "1" diff --git a/substreams/trigger-filters/src/lib.rs b/substreams/trigger-filters/src/lib.rs deleted file mode 100644 index 81bb423f7f5..00000000000 --- a/substreams/trigger-filters/src/lib.rs +++ /dev/null @@ -1,80 +0,0 @@ -use anyhow::anyhow; -use std::collections::HashSet; - -#[derive(Debug, Default, PartialEq)] -pub struct NearFilter<'a> { - pub accounts: HashSet<&'a str>, - pub partial_accounts: HashSet<(Option<&'a str>, Option<&'a str>)>, -} - -impl<'a> NearFilter<'a> { - pub fn matches(&self, account: &str) -> bool { - let partial_match = self.partial_accounts.iter().any(|partial| match partial { - (Some(prefix), Some(suffix)) => { - account.starts_with(prefix) && account.ends_with(suffix) - } - (Some(prefix), None) => account.starts_with(prefix), - (None, Some(suffix)) => account.ends_with(suffix), - (None, None) => unreachable!(), - }); - - if !self.accounts.contains(&account) && !partial_match { - return false; - } - - true - } -} - -impl<'a> TryFrom<&'a str> for NearFilter<'a> { - type Error = anyhow::Error; - - fn try_from(params: &'a str) -> Result { - let mut accounts: HashSet<&str> = HashSet::default(); - let mut partial_accounts: HashSet<(Option<&str>, Option<&str>)> = HashSet::default(); - let mut lines = params.lines(); - let mut header = lines - .next() - .ok_or(anyhow!("header line not present"))? - .split(","); - let accs_len: usize = header - .next() - .ok_or(anyhow!("header didn't have the expected format"))? - .parse() - .map_err(|_| anyhow!("accounts len is supposed to be a usize"))?; - let partials_len: usize = header - .next() - .ok_or(anyhow!("header didn't contain patials len"))? - .parse() - .map_err(|_| anyhow!("partials len is supposed to be a usize"))?; - - let accs_line = lines.next(); - if accs_len != 0 { - accounts.extend( - accs_line - .ok_or(anyhow!("full matches line not found"))? - .split(","), - ); - } - - if partials_len != 0 { - partial_accounts.extend(lines.take(partials_len).map(|line| { - let mut parts = line.split(","); - let start = match parts.next() { - Some(x) if x.is_empty() => None, - x => x, - }; - let end = match parts.next() { - Some(x) if x.is_empty() => None, - x => x, - }; - (start, end) - })); - } - - Ok(NearFilter { - accounts, - partial_accounts, - }) - } -} diff --git a/tests/Cargo.toml b/tests/Cargo.toml index e61cab6b660..737125a5533 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -10,7 +10,6 @@ async-stream = "0.3.6" async-trait = { workspace = true } graph = { path = "../graph" } graph-chain-ethereum = { path = "../chain/ethereum" } -graph-chain-substreams= {path = "../chain/substreams"} graph-node = { path = "../node" } graph-core = { path = "../core" } graph-graphql = { path = "../graphql" } diff --git a/tests/runner-tests/substreams/.gitignore b/tests/runner-tests/substreams/.gitignore deleted file mode 100644 index 37e5bb836a4..00000000000 --- a/tests/runner-tests/substreams/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -target/ -.idea -src/pb/ -node_modules/ \ No newline at end of file diff --git a/tests/runner-tests/substreams/Cargo.lock b/tests/runner-tests/substreams/Cargo.lock deleted file mode 100644 index e8575b5b430..00000000000 --- a/tests/runner-tests/substreams/Cargo.lock +++ /dev/null @@ -1,980 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "aho-corasick" -version = "0.7.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" -dependencies = [ - "memchr", -] - -[[package]] -name = "anyhow" -version = "1.0.68" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61" - -[[package]] -name = "arrayvec" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" - -[[package]] -name = "autocfg" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" - -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - -[[package]] -name = "bigdecimal" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6aaf33151a6429fe9211d1b276eafdf70cdff28b071e76c0b0e1503221ea3744" -dependencies = [ - "num-bigint", - "num-integer", - "num-traits", -] - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitvec" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - -[[package]] -name = "block-buffer" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" -dependencies = [ - "generic-array", -] - -[[package]] -name = "byte-slice-cast" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" - -[[package]] -name = "byteorder" -version = "1.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" - -[[package]] -name = "bytes" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "cpufeatures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" -dependencies = [ - "libc", -] - -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array", - "typenum", -] - -[[package]] -name = "digest" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" -dependencies = [ - "block-buffer", - "crypto-common", -] - -[[package]] -name = "either" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" - -[[package]] -name = "ethabi" -version = "17.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4966fba78396ff92db3b817ee71143eccd98acf0f876b8d600e585a670c5d1b" -dependencies = [ - "ethereum-types", - "hex", - "once_cell", - "regex", - "serde", - "serde_json", - "sha3", - "thiserror", - "uint", -] - -[[package]] -name = "ethbloom" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11da94e443c60508eb62cf256243a64da87304c2802ac2528847f79d750007ef" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-rlp", - "impl-serde", - "tiny-keccak", -] - -[[package]] -name = "ethereum-types" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2827b94c556145446fcce834ca86b7abf0c39a805883fe20e72c5bfdb5a0dc6" -dependencies = [ - "ethbloom", - "fixed-hash", - "impl-rlp", - "impl-serde", - "primitive-types", - "uint", -] - -[[package]] -name = "fastrand" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" -dependencies = [ - "instant", -] - -[[package]] -name = "fixed-hash" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" -dependencies = [ - "byteorder", - "rand", - "rustc-hex", - "static_assertions", -] - -[[package]] -name = "fixedbitset" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" - -[[package]] -name = "funty" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" - -[[package]] -name = "generic-array" -version = "0.14.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - -[[package]] -name = "heck" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - -[[package]] -name = "hex-literal" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" - -[[package]] -name = "impl-codec" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-rlp" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" -dependencies = [ - "rlp", -] - -[[package]] -name = "impl-serde" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" -dependencies = [ - "serde", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "indexmap" -version = "1.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" -dependencies = [ - "autocfg", - "hashbrown", -] - -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "itertools" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" - -[[package]] -name = "keccak" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3afef3b6eff9ce9d8ff9b3601125eec7f0c8cbac7abd14f355d053fa56c98768" -dependencies = [ - "cpufeatures", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.139" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" - -[[package]] -name = "log" -version = "0.4.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "memchr" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" - -[[package]] -name = "multimap" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" - -[[package]] -name = "num-bigint" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-integer" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" -dependencies = [ - "autocfg", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" -dependencies = [ - "autocfg", -] - -[[package]] -name = "once_cell" -version = "1.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" - -[[package]] -name = "pad" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2ad9b889f1b12e0b9ee24db044b5129150d5eada288edc800f789928dc8c0e3" -dependencies = [ - "unicode-width", -] - -[[package]] -name = "parity-scale-codec" -version = "3.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ab01d0f889e957861bc65888d5ccbe82c158d0270136ba46820d43837cdf72" -dependencies = [ - "arrayvec", - "bitvec", - "byte-slice-cast", - "impl-trait-for-tuples", - "parity-scale-codec-derive", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "3.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "petgraph" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" -dependencies = [ - "fixedbitset", - "indexmap", -] - -[[package]] -name = "ppv-lite86" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" - -[[package]] -name = "prettyplease" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e97e3215779627f01ee256d2fad52f3d95e8e1c11e9fc6fd08f7cd455d5d5c78" -dependencies = [ - "proc-macro2", - "syn", -] - -[[package]] -name = "primitive-types" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28720988bff275df1f51b171e1b2a18c30d194c4d2b61defdacecd625a5d94a" -dependencies = [ - "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" -dependencies = [ - "once_cell", - "thiserror", - "toml", -] - -[[package]] -name = "proc-macro2" -version = "1.0.50" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ef7d57beacfaf2d8aee5937dab7b7f28de3cb8b1828479bb5de2a7106f2bae2" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "prost" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" -dependencies = [ - "bytes", - "prost-derive", -] - -[[package]] -name = "prost-build" -version = "0.11.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f8ad728fb08fe212df3c05169e940fbb6d9d16a877ddde14644a983ba2012e" -dependencies = [ - "bytes", - "heck", - "itertools", - "lazy_static", - "log", - "multimap", - "petgraph", - "prettyplease", - "prost", - "prost-types", - "regex", - "syn", - "tempfile", - "which", -] - -[[package]] -name = "prost-derive" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "prost-types" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" -dependencies = [ - "prost", -] - -[[package]] -name = "quote" -version = "1.0.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "radium" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags", -] - -[[package]] -name = "regex" -version = "1.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.6.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" - -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] - -[[package]] -name = "rlp" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" -dependencies = [ - "bytes", - "rustc-hex", -] - -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - -[[package]] -name = "ryu" -version = "1.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" - -[[package]] -name = "serde" -version = "1.0.152" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.152" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.91" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "sha3" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" -dependencies = [ - "digest", - "keccak", -] - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "substreams" -version = "0.5.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af972e374502cdfc9998132f5343848d1c58f27a295dc061a89804371f408a46" -dependencies = [ - "anyhow", - "bigdecimal", - "hex", - "hex-literal", - "num-bigint", - "num-traits", - "pad", - "prost", - "prost-build", - "prost-types", - "substreams-macro", - "thiserror", -] - -[[package]] -name = "substreams-entity-change" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d423d0c12a9284a3d6d4ec288dbc9bfec3d55f9056098ba91a6dcfa64fb3889e" -dependencies = [ - "base64", - "prost", - "prost-types", - "substreams", -] - -[[package]] -name = "substreams-ethereum" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78effc18ed321399fe15ec082806e96a58d213f79741d078c1cd26dd6dd53025" -dependencies = [ - "getrandom", - "num-bigint", - "substreams", - "substreams-ethereum-abigen", - "substreams-ethereum-core", - "substreams-ethereum-derive", -] - -[[package]] -name = "substreams-ethereum-abigen" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97a176f39a6e09553c17a287edacd1854e5686fd20ffea3c9655dfc44d94b35e" -dependencies = [ - "anyhow", - "ethabi", - "heck", - "hex", - "prettyplease", - "proc-macro2", - "quote", - "substreams-ethereum-core", - "syn", -] - -[[package]] -name = "substreams-ethereum-core" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db4700cfe408b75634a3c6b3a0caf7bddba4879601d2085c811485ea54cbde2d" -dependencies = [ - "bigdecimal", - "ethabi", - "getrandom", - "num-bigint", - "prost", - "prost-build", - "prost-types", - "substreams", -] - -[[package]] -name = "substreams-ethereum-derive" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40d6d278d926fe3f0775d996ee2b5e1dc822c1b4bf4f7bf07c7fbb5bce6c79a9" -dependencies = [ - "ethabi", - "heck", - "hex", - "num-bigint", - "proc-macro2", - "quote", - "substreams-ethereum-abigen", - "syn", -] - -[[package]] -name = "substreams-ethereum-quickstart" -version = "1.0.0" -dependencies = [ - "base64", - "prost", - "prost-types", - "substreams", - "substreams-entity-change", - "substreams-ethereum", -] - -[[package]] -name = "substreams-macro" -version = "0.5.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6521ccd011a4c3f52cd3c31fc7400733e4feba2094e0e0e6354adca25b2b3f37" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "thiserror", -] - -[[package]] -name = "syn" -version = "1.0.107" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - -[[package]] -name = "tempfile" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" -dependencies = [ - "cfg-if", - "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi", -] - -[[package]] -name = "thiserror" -version = "1.0.38" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.38" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] - -[[package]] -name = "toml" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" -dependencies = [ - "serde", -] - -[[package]] -name = "typenum" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" - -[[package]] -name = "uint" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - -[[package]] -name = "unicode-ident" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" - -[[package]] -name = "unicode-width" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" - -[[package]] -name = "version_check" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "which" -version = "4.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" -dependencies = [ - "either", - "libc", - "once_cell", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "wyz" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" -dependencies = [ - "tap", -] diff --git a/tests/runner-tests/substreams/Cargo.toml b/tests/runner-tests/substreams/Cargo.toml deleted file mode 100644 index 108db3089a8..00000000000 --- a/tests/runner-tests/substreams/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -name = "substreams-ethereum-quickstart" -version = "1.0.0" -edition = "2021" - -[lib] -name = "substreams" -crate-type = ["cdylib"] - -[dependencies] -substreams = "0.5" -substreams-ethereum = "0.9" -substreams-entity-change = "1.3" - -[profile.release] -lto = true -opt-level = 's' -strip = "debuginfo" diff --git a/tests/runner-tests/substreams/README.md b/tests/runner-tests/substreams/README.md deleted file mode 100644 index 50e893de7a1..00000000000 --- a/tests/runner-tests/substreams/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# Substreams-powered subgraph: tracking contract creation - -A basic Substreams-powered subgraph, including the Substreams definition. This example detects new -contract deployments on Ethereum, tracking the creation block and timestamp. There is a -demonstration of the Graph Node integration, using `substreams_entity_change` types and helpers. - -## Prerequisites - -This -[requires the dependencies necessary for local Substreams development](https://substreams.streamingfast.io/developers-guide/installation-requirements). - -## Quickstart - -``` -pnpm install # install graph-cli -pnpm substreams:prepare # build and package the substreams module -pnpm subgraph:build # build the subgraph -pnpm subgraph:deploy # deploy the subgraph -``` diff --git a/tests/runner-tests/substreams/package.json b/tests/runner-tests/substreams/package.json deleted file mode 100644 index f7dba22f4bf..00000000000 --- a/tests/runner-tests/substreams/package.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "name": "substreams", - "version": "0.0.0", - "private": true, - "scripts": { - "codegen": "graph codegen", - "deploy": "graph deploy", - "deploy:test": "graph deploy test/substreams --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI", - "subgraph:build": "graph build", - "substreams:build": "cargo build --target wasm32-unknown-unknown --release", - "substreams:clean": "rm -rf ./target && rm -rf ./src/pb", - "substreams:package": "substreams pack ./substreams.yaml", - "substreams:prepare": "pnpm substreams:protogen && pnpm substreams:build && pnpm substreams:package", - "substreams:protogen": "substreams protogen ./substreams.yaml --exclude-paths='sf/substreams,google'", - "substreams:stream": "substreams run -e mainnet.eth.streamingfast.io:443 substreams.yaml graph_out -s 12292922 -t +10" - }, - "devDependencies": { - "@graphprotocol/graph-cli": "0.61.0" - } -} diff --git a/tests/runner-tests/substreams/proto/example.proto b/tests/runner-tests/substreams/proto/example.proto deleted file mode 100644 index ac4d80b2452..00000000000 --- a/tests/runner-tests/substreams/proto/example.proto +++ /dev/null @@ -1,14 +0,0 @@ -syntax = "proto3"; - -package example; - -message Contracts { - repeated Contract contracts = 1; -} - -message Contract { - string address = 1; - uint64 blockNumber = 2; - string timestamp = 3; - uint64 ordinal = 4; -} \ No newline at end of file diff --git a/tests/runner-tests/substreams/rust-toolchain.toml b/tests/runner-tests/substreams/rust-toolchain.toml deleted file mode 100644 index e2c33ff1c31..00000000000 --- a/tests/runner-tests/substreams/rust-toolchain.toml +++ /dev/null @@ -1,3 +0,0 @@ -[toolchain] -components = ["rustfmt"] -targets = ["wasm32-unknown-unknown"] diff --git a/tests/runner-tests/substreams/schema.graphql b/tests/runner-tests/substreams/schema.graphql deleted file mode 100644 index 7b1c7d114ed..00000000000 --- a/tests/runner-tests/substreams/schema.graphql +++ /dev/null @@ -1,9 +0,0 @@ -type Contract @entity { - id: ID! - - "The timestamp when the contract was deployed" - timestamp: String! - - "The block number of the contract deployment" - blockNumber: BigInt! -} diff --git a/tests/runner-tests/substreams/src/lib.rs b/tests/runner-tests/substreams/src/lib.rs deleted file mode 100644 index 0127d9aadd9..00000000000 --- a/tests/runner-tests/substreams/src/lib.rs +++ /dev/null @@ -1,40 +0,0 @@ -mod pb; - -use pb::example::{Contract, Contracts}; - -use substreams::Hex; -use substreams_entity_change::pb::entity::EntityChanges; -use substreams_entity_change::tables::Tables; -use substreams_ethereum::pb::eth; - -#[substreams::handlers::map] -fn map_contract(block: eth::v2::Block) -> Result { - let contracts = block - .calls() - .filter(|view| !view.call.state_reverted) - .filter(|view| view.call.call_type == eth::v2::CallType::Create as i32) - .map(|view| Contract { - address: format!("0x{}", Hex(&view.call.address)), - block_number: block.number, - timestamp: block.timestamp_seconds().to_string(), - ordinal: view.call.begin_ordinal, - }) - .collect(); - - Ok(Contracts { contracts }) -} - -#[substreams::handlers::map] -pub fn graph_out(contracts: Contracts) -> Result { - // hash map of name to a table - let mut tables = Tables::new(); - - for contract in contracts.contracts.into_iter() { - tables - .create_row("Contract", contract.address) - .set("timestamp", contract.timestamp) - .set("blockNumber", contract.block_number); - } - - Ok(tables.to_entity_changes()) -} diff --git a/tests/runner-tests/substreams/subgraph.yaml b/tests/runner-tests/substreams/subgraph.yaml deleted file mode 100644 index 377e326c568..00000000000 --- a/tests/runner-tests/substreams/subgraph.yaml +++ /dev/null @@ -1,16 +0,0 @@ -specVersion: 0.0.4 -description: Ethereum Contract Tracking Subgraph (powered by Substreams) -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: schema.graphql -dataSources: - - kind: substreams - name: substream_test - network: test - source: - package: - moduleName: graph_out - file: substreams-test-v1.0.1.spkg - mapping: - kind: substreams/graph-entities - apiVersion: 0.0.6 diff --git a/tests/runner-tests/substreams/substreams-test-v1.0.1.spkg b/tests/runner-tests/substreams/substreams-test-v1.0.1.spkg deleted file mode 100644 index 641e2786a4a..00000000000 Binary files a/tests/runner-tests/substreams/substreams-test-v1.0.1.spkg and /dev/null differ diff --git a/tests/runner-tests/substreams/substreams.yaml b/tests/runner-tests/substreams/substreams.yaml deleted file mode 100644 index 25704378334..00000000000 --- a/tests/runner-tests/substreams/substreams.yaml +++ /dev/null @@ -1,33 +0,0 @@ -specVersion: v0.1.0 -package: - name: 'substreams_test' # the name to be used in the .spkg - version: v1.0.1 # the version to use when creating the .spkg - -imports: # dependencies - entity: https://github.com/streamingfast/substreams-entity-change/releases/download/v0.2.1/substreams-entity-change-v0.2.1.spkg - -protobuf: # specifies custom types for use by Substreams modules - files: - - example.proto - importPaths: - - ./proto - -binaries: - default: - type: wasm/rust-v1 - file: ./target/wasm32-unknown-unknown/release/substreams.wasm - -modules: # specify modules with their inputs and outputs. - - name: map_contract - kind: map - inputs: - - source: sf.ethereum.type.v2.Block - output: - type: proto:test.Contracts - - - name: graph_out - kind: map - inputs: - - map: map_contract - output: - type: proto:substreams.entity.v1.EntityChanges # this type can be consumed by Graph Node diff --git a/tests/src/fixture/mod.rs b/tests/src/fixture/mod.rs index a965bab3c23..62000dc5e8e 100644 --- a/tests/src/fixture/mod.rs +++ b/tests/src/fixture/mod.rs @@ -1,5 +1,4 @@ pub mod ethereum; -pub mod substreams; use std::collections::{BTreeSet, HashMap}; use std::marker::PhantomData; @@ -47,7 +46,6 @@ use graph::prelude::{ SubgraphName, SubgraphRegistrar, SubgraphStore as _, SubgraphVersionSwitchingMode, TriggerProcessor, }; -use graph::schema::InputSchema; use graph_chain_ethereum::chain::RuntimeAdapterBuilder; use graph_chain_ethereum::network::EthereumNetworkAdapters; use graph_chain_ethereum::Chain; @@ -107,7 +105,6 @@ impl CommonChainConfig { false, SubgraphLimit::Unlimited, Arc::new(EndpointMetrics::mock()), - false, ))]); Self { @@ -135,19 +132,6 @@ impl TestChainTrait for TestChain { } } -pub struct TestChainSubstreams { - pub chain: Arc, - pub block_stream_builder: Arc, -} - -impl TestChainTrait for TestChainSubstreams { - fn set_block_stream(&self, _blocks: Vec>) {} - - fn chain(&self) -> Arc { - self.chain.clone() - } -} - pub trait TestChainTrait { fn set_block_stream(&self, blocks: Vec>); @@ -230,37 +214,6 @@ impl TestContext { .unwrap() } - pub async fn runner_substreams( - &self, - stop_block: BlockPtr, - ) -> graph_core::subgraph::SubgraphRunner< - graph_chain_substreams::Chain, - RuntimeHostBuilder, - > { - let (logger, deployment, raw) = self.get_runner_context().await; - let tp: Box> = Box::new( - graph_chain_substreams::TriggerProcessor::new(deployment.clone()), - ); - - let deployment_status_metric = self - .instance_manager - .new_deployment_status_metric(&deployment); - - self.instance_manager - .build_subgraph_runner_inner( - logger, - self.env_vars.cheap_clone(), - deployment, - raw, - Some(stop_block.block_number()), - tp, - deployment_status_metric, - true, - ) - .await - .unwrap() - } - pub async fn get_runner_context(&self) -> (Logger, DeploymentLocator, serde_yaml::Mapping) { let logger = self.logger.cheap_clone(); let deployment = self.deployment.cheap_clone(); @@ -407,6 +360,7 @@ fn test_logger(test_name: &str) -> Logger { graph::log::logger(true).new(o!("test" => test_name.to_string())) } +#[allow(clippy::await_holding_lock)] pub async fn stores(test_name: &str, store_config_path: &str) -> Stores { let _mutex_guard = STORE_MUTEX.lock().unwrap(); @@ -793,18 +747,6 @@ impl BlockStreamBuilder for MutexBlockStreamBuilder { .await } - async fn build_substreams( - &self, - _chain: &C, - _schema: InputSchema, - _deployment: DeploymentLocator, - _block_cursor: FirehoseCursor, - _subgraph_current_block: Option, - _filter: Arc, - ) -> anyhow::Result>> { - unimplemented!(); - } - async fn build_polling( &self, chain: &C, @@ -845,18 +787,6 @@ impl BlockStreamBuilder for StaticStreamBuilder where C::TriggerData: Clone, { - async fn build_substreams( - &self, - _chain: &C, - _schema: InputSchema, - _deployment: DeploymentLocator, - _block_cursor: FirehoseCursor, - _subgraph_current_block: Option, - _filter: Arc, - ) -> anyhow::Result>> { - unimplemented!() - } - async fn build_firehose( &self, _chain: &C, diff --git a/tests/src/fixture/substreams.rs b/tests/src/fixture/substreams.rs deleted file mode 100644 index f94fdfa95ec..00000000000 --- a/tests/src/fixture/substreams.rs +++ /dev/null @@ -1,34 +0,0 @@ -use std::sync::Arc; - -use graph::{blockchain::client::ChainClient, components::network_provider::ChainName}; - -use super::{CommonChainConfig, Stores, TestChainSubstreams}; - -pub async fn chain(test_name: &str, stores: &Stores) -> TestChainSubstreams { - let CommonChainConfig { - logger_factory, - mock_registry, - chain_store, - firehose_endpoints, - .. - } = CommonChainConfig::new(test_name, stores).await; - - let block_stream_builder = Arc::new(graph_chain_substreams::BlockStreamBuilder::new()); - let client = Arc::new(ChainClient::::new_firehose( - firehose_endpoints, - )); - - let chain = Arc::new(graph_chain_substreams::Chain::new( - logger_factory, - client, - mock_registry, - chain_store, - block_stream_builder.clone(), - ChainName::from("test-chain"), - )); - - TestChainSubstreams { - chain, - block_stream_builder, - } -} diff --git a/tests/tests/runner_tests.rs b/tests/tests/runner_tests.rs index 6e3b7d80b0c..f35df89ce2e 100644 --- a/tests/tests/runner_tests.rs +++ b/tests/tests/runner_tests.rs @@ -23,7 +23,6 @@ use graph_tests::fixture::ethereum::{ push_test_polling_trigger, }; -use graph_tests::fixture::substreams::chain as substreams_chain; use graph_tests::fixture::{ self, test_ptr, test_ptr_reorged, MockAdapterSelector, NoopAdapterSelector, TestChainTrait, TestContext, TestInfo, @@ -438,37 +437,6 @@ async fn derived_loaders() { ); } -// This PR https://github.com/graphprotocol/graph-node/pull/4787 -// changed the way TriggerFilters were built -// A bug was introduced in the PR which resulted in filters for substreams not being included -// This test tests that the TriggerFilter is built correctly for substreams -#[graph::test] -async fn substreams_trigger_filter_construction() -> anyhow::Result<()> { - let RunnerTestRecipe { stores, test_info } = - RunnerTestRecipe::new("substreams", "substreams").await; - - let chain = substreams_chain(&test_info.test_name, &stores).await; - let ctx = fixture::setup(&test_info, &stores, &chain, None, None).await; - - let runner = ctx.runner_substreams(test_ptr(0)).await; - let filter = runner.build_filter_for_test(); - - assert_eq!(filter.chain_filter.module_name(), "graph_out"); - assert_eq!( - filter - .chain_filter - .modules() - .as_ref() - .unwrap() - .modules - .len(), - 2 - ); - assert_eq!(filter.chain_filter.start_block().unwrap(), 0); - assert_eq!(filter.chain_filter.data_sources_len(), 1); - Ok(()) -} - #[graph::test] async fn end_block() -> anyhow::Result<()> { let RunnerTestRecipe { stores, test_info } =