diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml index 102a8b53e78..5c5129c51a2 100644 --- a/.github/workflows/audit.yml +++ b/.github/workflows/audit.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 10 steps: - - uses: actions/checkout@v2 - - uses: actions-rs/audit-check@v1 + - uses: actions/checkout@v4 + - uses: rustsec/audit-check@69366f33c96575abad1ee0dba8212993eecbe998 #v2.0.0 with: token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b1a91b2098c..0fa6d58bbb7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -42,7 +42,7 @@ jobs: # https://github.com/smorimoto/tune-github-hosted-runner-network/blob/main/action.yml run: sudo ethtool -K eth0 tx off rx off - name: Checkout sources - uses: actions/checkout@v2 + uses: actions/checkout@v4 # Don't use the rust-cache as it leads to 'no space left on device' errors # - uses: Swatinem/rust-cache@v2 - name: Install lld @@ -86,10 +86,10 @@ jobs: # https://github.com/smorimoto/tune-github-hosted-runner-network/blob/main/action.yml run: sudo ethtool -K eth0 tx off rx off - name: Checkout sources - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Install Node 20 - uses: actions/setup-node@v3 + uses: actions/setup-node@v4 with: node-version: "20" cache: yarn @@ -137,11 +137,11 @@ jobs: # https://github.com/smorimoto/tune-github-hosted-runner-network/blob/main/action.yml run: sudo ethtool -K eth0 tx off rx off - name: Checkout sources - uses: actions/checkout@v2 + uses: actions/checkout@v4 - uses: Swatinem/rust-cache@v2 - name: Install Node 20 - uses: actions/setup-node@v3 + uses: actions/setup-node@v4 with: node-version: "20" cache: yarn @@ -183,7 +183,7 @@ jobs: env: RUSTFLAGS: "-D warnings" steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Check formatting uses: actions-rs/cargo@v1 @@ -196,7 +196,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 # Unlike rustfmt, Clippy actually compiles stuff so it benefits from # caching. - uses: Swatinem/rust-cache@v2 @@ -214,7 +214,7 @@ jobs: env: RUSTFLAGS: "-D warnings" steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - uses: Swatinem/rust-cache@v2 - name: Install dependencies run: | diff --git a/.github/workflows/code-coverage.yml b/.github/workflows/code-coverage.yml index 5ca14794035..0468f22ff08 100644 --- a/.github/workflows/code-coverage.yml +++ b/.github/workflows/code-coverage.yml @@ -39,10 +39,10 @@ jobs: ports: - 5432:5432 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: Swatinem/rust-cache@v2 - name: Install Node 14 - uses: actions/setup-node@v3 + uses: actions/setup-node@v4 with: node-version: "14" cache: yarn @@ -62,12 +62,12 @@ jobs: - name: Generate code coverage run: cargo llvm-cov --package graph-tests --lcov --output-path lcov.info -- --nocapture - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: code-coverage-info path: lcov.info - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v5 with: # No token needed, because the repo is public. files: lcov.info diff --git a/Cargo.lock b/Cargo.lock index 17573acd02c..b2ae24e0169 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "Inflector" @@ -2073,6 +2073,7 @@ dependencies = [ "never", "parity-wasm", "semver", + "serde_yaml", "uuid", "wasm-instrument", "wasmtime", diff --git a/NEWS.md b/NEWS.md index ab316821488..a8843422fde 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,5 +1,37 @@ # NEWS +## v0.38.0 + +### What's new + +- A new `deployment_synced` metric is added [(#5816)](https://github.com/graphprotocol/graph-node/pull/5816) + that indicates whether a deployment has reached the chain head since it was deployed. + + **Possible values for the metric:** + - `0` - means that the deployment is not synced; + - `1` - means that the deployment is synced; + + _If a deployment is not running, the metric reports no value for that deployment._ + +## v0.37.0 + +### What's new + +- A new `deployment_status` metric is added [(#5720)](https://github.com/graphprotocol/graph-node/pull/5720) with the + following behavior: + - Once graph-node has figured out that it should index a deployment, `deployment_status` is set to `1` _(starting)_; + - When the block stream is created and blocks are ready to be processed, `deployment_status` is set to `2` _( + running)_; + - When a deployment is unassigned, `deployment_status` is set to `3` _(stopped)_; + - If a temporary or permanent failure occurs, `deployment_status` is set to `4` _(failed)_; + - If indexing manages to recover from a temporary failure, the `deployment_status` is set back to `2` _( + running)_; + +### Breaking changes + +- The `deployment_failed` metric is removed and the failures are reported by the new `deployment_status` + metric. [(#5720)](https://github.com/graphprotocol/graph-node/pull/5720) + ## v0.36.0 ### Note on Firehose Extended Block Details diff --git a/chain/arweave/src/chain.rs b/chain/arweave/src/chain.rs index f49611ddf93..40f2538a400 100644 --- a/chain/arweave/src/chain.rs +++ b/chain/arweave/src/chain.rs @@ -3,11 +3,11 @@ use graph::blockchain::client::ChainClient; use graph::blockchain::firehose_block_ingestor::FirehoseBlockIngestor; use graph::blockchain::{ BasicBlockchainBuilder, Block, BlockIngestor, BlockchainBuilder, BlockchainKind, - EmptyNodeCapabilities, NoopDecoderHook, NoopRuntimeAdapter, + EmptyNodeCapabilities, NoopDecoderHook, NoopRuntimeAdapter, TriggerFilterWrapper, }; use graph::cheap_clone::CheapClone; use graph::components::network_provider::ChainName; -use graph::components::store::DeploymentCursorTracker; +use graph::components::store::{DeploymentCursorTracker, SourceableStore}; use graph::data::subgraph::UnifiedMappingApiVersion; use graph::env::EnvVars; use graph::firehose::FirehoseEndpoint; @@ -27,11 +27,13 @@ use graph::{ prelude::{async_trait, o, BlockNumber, ChainStore, Error, Logger, LoggerFactory}, }; use prost::Message; +use std::collections::BTreeSet; use std::sync::Arc; use crate::adapter::TriggerFilter; use crate::data_source::{DataSourceTemplate, UnresolvedDataSourceTemplate}; use crate::trigger::{self, ArweaveTrigger}; +use crate::Block as ArweaveBlock; use crate::{ codec, data_source::{DataSource, UnresolvedDataSource}, @@ -119,7 +121,8 @@ impl Blockchain for Chain { deployment: DeploymentLocator, store: impl DeploymentCursorTracker, start_blocks: Vec, - filter: Arc, + _source_subgraph_stores: Vec>, + filter: Arc>, unified_api_version: UnifiedMappingApiVersion, ) -> Result>, Error> { let adapter = self @@ -135,7 +138,10 @@ impl Blockchain for Chain { .subgraph_logger(&deployment) .new(o!("component" => "FirehoseBlockStream")); - let firehose_mapper = Arc::new(FirehoseMapper { adapter, filter }); + let firehose_mapper = Arc::new(FirehoseMapper { + adapter, + filter: filter.chain_filter.clone(), + }); Ok(Box::new(FirehoseBlockStream::new( deployment.hash, @@ -199,6 +205,10 @@ impl TriggersAdapterTrait for TriggersAdapter { panic!("Should never be called since not used by FirehoseBlockStream") } + async fn chain_head_ptr(&self) -> Result, Error> { + unimplemented!() + } + async fn triggers_in_block( &self, logger: &Logger, @@ -258,6 +268,14 @@ impl TriggersAdapterTrait for TriggersAdapter { number: block.number.saturating_sub(1), })) } + + async fn load_block_ptrs_by_numbers( + &self, + _logger: Logger, + _block_numbers: BTreeSet, + ) -> Result, Error> { + todo!() + } } pub struct FirehoseMapper { diff --git a/chain/cosmos/src/chain.rs b/chain/cosmos/src/chain.rs index bd6b66e55c6..353b1e4dbbe 100644 --- a/chain/cosmos/src/chain.rs +++ b/chain/cosmos/src/chain.rs @@ -1,9 +1,10 @@ use graph::blockchain::firehose_block_ingestor::FirehoseBlockIngestor; -use graph::blockchain::{BlockIngestor, NoopDecoderHook}; +use graph::blockchain::{BlockIngestor, NoopDecoderHook, TriggerFilterWrapper}; use graph::components::network_provider::ChainName; use graph::env::EnvVars; use graph::prelude::MetricsRegistry; use graph::substreams::Clock; +use std::collections::BTreeSet; use std::convert::TryFrom; use std::sync::Arc; @@ -11,7 +12,7 @@ use graph::blockchain::block_stream::{BlockStreamError, BlockStreamMapper, Fireh use graph::blockchain::client::ChainClient; use graph::blockchain::{BasicBlockchainBuilder, BlockchainBuilder, NoopRuntimeAdapter}; use graph::cheap_clone::CheapClone; -use graph::components::store::DeploymentCursorTracker; +use graph::components::store::{DeploymentCursorTracker, SourceableStore}; use graph::data::subgraph::UnifiedMappingApiVersion; use graph::{ blockchain::{ @@ -33,7 +34,7 @@ use crate::data_source::{ DataSource, DataSourceTemplate, EventOrigin, UnresolvedDataSource, UnresolvedDataSourceTemplate, }; use crate::trigger::CosmosTrigger; -use crate::{codec, TriggerFilter}; +use crate::{codec, Block, TriggerFilter}; pub struct Chain { logger_factory: LoggerFactory, @@ -113,7 +114,8 @@ impl Blockchain for Chain { deployment: DeploymentLocator, store: impl DeploymentCursorTracker, start_blocks: Vec, - filter: Arc, + _source_subgraph_stores: Vec>, + filter: Arc>, unified_api_version: UnifiedMappingApiVersion, ) -> Result>, Error> { let adapter = self @@ -129,7 +131,10 @@ impl Blockchain for Chain { .subgraph_logger(&deployment) .new(o!("component" => "FirehoseBlockStream")); - let firehose_mapper = Arc::new(FirehoseMapper { adapter, filter }); + let firehose_mapper = Arc::new(FirehoseMapper { + adapter, + filter: filter.chain_filter.clone(), + }); Ok(Box::new(FirehoseBlockStream::new( deployment.hash, @@ -192,6 +197,16 @@ impl TriggersAdapterTrait for TriggersAdapter { ) -> Result, Error> { panic!("Should never be called since not used by FirehoseBlockStream") } + async fn load_block_ptrs_by_numbers( + &self, + _logger: Logger, + _block_numbers: BTreeSet, + ) -> Result, Error> { + todo!() + } + async fn chain_head_ptr(&self) -> Result, Error> { + unimplemented!() + } async fn scan_triggers( &self, @@ -467,9 +482,12 @@ impl FirehoseMapperTrait for FirehoseMapper { #[cfg(test)] mod test { - use graph::prelude::{ - slog::{o, Discard, Logger}, - tokio, + use graph::{ + blockchain::Trigger, + prelude::{ + slog::{o, Discard, Logger}, + tokio, + }, }; use super::*; @@ -600,7 +618,10 @@ mod test { // they may not be in the same order for trigger in expected_triggers { assert!( - triggers.trigger_data.contains(&trigger), + triggers.trigger_data.iter().any(|t| match t { + Trigger::Chain(t) => t == &trigger, + _ => false, + }), "Expected trigger list to contain {:?}, but it only contains: {:?}", trigger, triggers.trigger_data diff --git a/chain/ethereum/src/adapter.rs b/chain/ethereum/src/adapter.rs index f78ff1b0bec..469e8932b5e 100644 --- a/chain/ethereum/src/adapter.rs +++ b/chain/ethereum/src/adapter.rs @@ -1,8 +1,9 @@ use anyhow::Error; -use ethabi::{Error as ABIError, Function, ParamType, Token}; +use ethabi::{Error as ABIError, ParamType, Token}; use graph::blockchain::ChainIdentifier; use graph::components::subgraph::MappingError; use graph::data::store::ethereum::call; +use graph::data_source::common::ContractCall; use graph::firehose::CallToFilter; use graph::firehose::CombinedFilter; use graph::firehose::LogFilter; @@ -93,16 +94,6 @@ impl EventSignatureWithTopics { } } -#[derive(Clone, Debug)] -pub struct ContractCall { - pub contract_name: String, - pub address: Address, - pub block_ptr: BlockPtr, - pub function: Function, - pub args: Vec, - pub gas: Option, -} - #[derive(Error, Debug)] pub enum EthereumRpcError { #[error("call error: {0}")] diff --git a/chain/ethereum/src/chain.rs b/chain/ethereum/src/chain.rs index cf46a675212..f632ee36d93 100644 --- a/chain/ethereum/src/chain.rs +++ b/chain/ethereum/src/chain.rs @@ -3,18 +3,21 @@ use anyhow::{Context, Error}; use graph::blockchain::client::ChainClient; use graph::blockchain::firehose_block_ingestor::{FirehoseBlockIngestor, Transforms}; use graph::blockchain::{ - BlockIngestor, BlockTime, BlockchainKind, ChainIdentifier, TriggersAdapterSelector, + BlockIngestor, BlockTime, BlockchainKind, ChainIdentifier, ExtendedBlockPtr, + TriggerFilterWrapper, TriggersAdapterSelector, }; use graph::components::network_provider::ChainName; -use graph::components::store::DeploymentCursorTracker; +use graph::components::store::{DeploymentCursorTracker, SourceableStore}; use graph::data::subgraph::UnifiedMappingApiVersion; use graph::firehose::{FirehoseEndpoint, ForkStep}; use graph::futures03::compat::Future01CompatExt; +use graph::futures03::TryStreamExt; use graph::prelude::{ - BlockHash, ComponentLoggerConfig, ElasticComponentLoggerConfig, EthereumBlock, + retry, BlockHash, ComponentLoggerConfig, ElasticComponentLoggerConfig, EthereumBlock, EthereumCallCache, LightEthereumBlock, LightEthereumBlockExt, MetricsRegistry, }; use graph::schema::InputSchema; +use graph::slog::{debug, error, trace}; use graph::substreams::Clock; use graph::{ blockchain::{ @@ -36,7 +39,8 @@ use graph::{ }, }; use prost::Message; -use std::collections::HashSet; +use std::collections::{BTreeSet, HashSet}; +use std::future::Future; use std::iter::FromIterator; use std::sync::Arc; use std::time::Duration; @@ -61,6 +65,7 @@ use crate::{BufferedCallCache, NodeCapabilities}; use crate::{EthereumAdapter, RuntimeAdapter}; use graph::blockchain::block_stream::{ BlockStream, BlockStreamBuilder, BlockStreamError, BlockStreamMapper, FirehoseCursor, + TriggersAdapterWrapper, }; /// Celo Mainnet: 42220, Testnet Alfajores: 44787, Testnet Baklava: 62320 @@ -121,24 +126,51 @@ impl BlockStreamBuilder for EthereumStreamBuilder { unimplemented!() } + async fn build_subgraph_block_stream( + &self, + chain: &Chain, + deployment: DeploymentLocator, + start_blocks: Vec, + source_subgraph_stores: Vec>, + subgraph_current_block: Option, + filter: Arc>, + unified_api_version: UnifiedMappingApiVersion, + ) -> Result>> { + self.build_polling( + chain, + deployment, + start_blocks, + source_subgraph_stores, + subgraph_current_block, + filter, + unified_api_version, + ) + .await + } + async fn build_polling( &self, chain: &Chain, deployment: DeploymentLocator, start_blocks: Vec, + source_subgraph_stores: Vec>, subgraph_current_block: Option, - filter: Arc<::TriggerFilter>, + filter: Arc>, unified_api_version: UnifiedMappingApiVersion, ) -> Result>> { - let requirements = filter.node_capabilities(); - let adapter = chain - .triggers_adapter(&deployment, &requirements, unified_api_version.clone()) - .unwrap_or_else(|_| { - panic!( - "no adapter for network {} with capabilities {}", - chain.name, requirements - ) - }); + let requirements = filter.chain_filter.node_capabilities(); + let is_using_subgraph_composition = !source_subgraph_stores.is_empty(); + let adapter = TriggersAdapterWrapper::new( + chain + .triggers_adapter(&deployment, &requirements, unified_api_version.clone()) + .unwrap_or_else(|_| { + panic!( + "no adapter for network {} with capabilities {}", + chain.name, requirements + ) + }), + source_subgraph_stores, + ); let logger = chain .logger_factory @@ -153,33 +185,45 @@ impl BlockStreamBuilder for EthereumStreamBuilder { // This is ok because Celo blocks are always final. And we _need_ to do this because // some events appear only in eth_getLogs but not in transaction receipts. // See also ca0edc58-0ec5-4c89-a7dd-2241797f5e50. - let chain_id = match chain.chain_client().as_ref() { + let reorg_threshold = match chain.chain_client().as_ref() { ChainClient::Rpc(adapter) => { - adapter + let chain_id = adapter .cheapest() .await .ok_or(anyhow!("unable to get eth adapter for chan_id call"))? .chain_id() - .await? + .await?; + + if CELO_CHAIN_IDS.contains(&chain_id) { + 0 + } else { + chain.reorg_threshold + } } - _ => panic!("expected rpc when using polling blockstream"), + _ if is_using_subgraph_composition => chain.reorg_threshold, + _ => panic!( + "expected rpc when using polling blockstream : {}", + is_using_subgraph_composition + ), }; - let reorg_threshold = match CELO_CHAIN_IDS.contains(&chain_id) { - false => chain.reorg_threshold, - true => 0, + + let max_block_range_size = if is_using_subgraph_composition { + ENV_VARS.max_block_range_size * 10 + } else { + ENV_VARS.max_block_range_size }; Ok(Box::new(PollingBlockStream::new( chain_store, chain_head_update_stream, - adapter, + Arc::new(adapter), chain.node_id.clone(), deployment.hash, filter, start_blocks, reorg_threshold, logger, - ENV_VARS.max_block_range_size, + max_block_range_size, ENV_VARS.target_triggers_per_block_range, unified_api_version, subgraph_current_block, @@ -201,7 +245,7 @@ impl BlockRefetcher for EthereumBlockRefetcher { logger: &Logger, cursor: FirehoseCursor, ) -> Result { - let endpoint = chain.chain_client().firehose_endpoint().await?; + let endpoint: Arc = chain.chain_client().firehose_endpoint().await?; let block = endpoint.get_block::(cursor, logger).await?; let ethereum_block: EthereumBlockWithCalls = (&block).try_into()?; Ok(BlockFinality::NonFinal(ethereum_block)) @@ -409,10 +453,27 @@ impl Blockchain for Chain { deployment: DeploymentLocator, store: impl DeploymentCursorTracker, start_blocks: Vec, - filter: Arc, + source_subgraph_stores: Vec>, + filter: Arc>, unified_api_version: UnifiedMappingApiVersion, ) -> Result>, Error> { let current_ptr = store.block_ptr(); + + if !filter.subgraph_filter.is_empty() { + return self + .block_stream_builder + .build_subgraph_block_stream( + self, + deployment, + start_blocks, + source_subgraph_stores, + current_ptr, + filter, + unified_api_version, + ) + .await; + } + match self.chain_client().as_ref() { ChainClient::Rpc(_) => { self.block_stream_builder @@ -420,6 +481,7 @@ impl Blockchain for Chain { self, deployment, start_blocks, + source_subgraph_stores, current_ptr, filter, unified_api_version, @@ -434,7 +496,7 @@ impl Blockchain for Chain { store.firehose_cursor(), start_blocks, current_ptr, - filter, + filter.chain_filter.clone(), unified_api_version, ) .await @@ -571,6 +633,8 @@ pub enum BlockFinality { // If a block may still be reorged, we need to work with more local data. NonFinal(EthereumBlockWithCalls), + + Ptr(Arc), } impl Default for BlockFinality { @@ -584,6 +648,7 @@ impl BlockFinality { match self { BlockFinality::Final(block) => block, BlockFinality::NonFinal(block) => &block.ethereum_block.block, + BlockFinality::Ptr(_) => unreachable!("light_block called on HeaderOnly"), } } } @@ -593,6 +658,7 @@ impl<'a> From<&'a BlockFinality> for BlockPtr { match block { BlockFinality::Final(b) => BlockPtr::from(&**b), BlockFinality::NonFinal(b) => BlockPtr::from(&b.ethereum_block), + BlockFinality::Ptr(b) => BlockPtr::new(b.hash.clone(), b.number), } } } @@ -602,6 +668,7 @@ impl Block for BlockFinality { match self { BlockFinality::Final(block) => block.block_ptr(), BlockFinality::NonFinal(block) => block.ethereum_block.block.block_ptr(), + BlockFinality::Ptr(block) => BlockPtr::new(block.hash.clone(), block.number), } } @@ -609,6 +676,9 @@ impl Block for BlockFinality { match self { BlockFinality::Final(block) => block.parent_ptr(), BlockFinality::NonFinal(block) => block.ethereum_block.block.parent_ptr(), + BlockFinality::Ptr(block) => { + Some(BlockPtr::new(block.parent_hash.clone(), block.number - 1)) + } } } @@ -641,16 +711,22 @@ impl Block for BlockFinality { json::to_value(eth_block) } BlockFinality::NonFinal(block) => json::to_value(&block.ethereum_block), + BlockFinality::Ptr(_) => Ok(json::Value::Null), } } fn timestamp(&self) -> BlockTime { - let ts = match self { - BlockFinality::Final(block) => block.timestamp, - BlockFinality::NonFinal(block) => block.ethereum_block.block.timestamp, - }; - let ts = i64::try_from(ts.as_u64()).unwrap(); - BlockTime::since_epoch(ts, 0) + match self { + BlockFinality::Final(block) => { + let ts = i64::try_from(block.timestamp.as_u64()).unwrap(); + BlockTime::since_epoch(ts, 0) + } + BlockFinality::NonFinal(block) => { + let ts = i64::try_from(block.ethereum_block.block.timestamp.as_u64()).unwrap(); + BlockTime::since_epoch(ts, 0) + } + BlockFinality::Ptr(block) => block.timestamp, + } } } @@ -665,6 +741,81 @@ pub struct TriggersAdapter { unified_api_version: UnifiedMappingApiVersion, } +/// Fetches blocks from the cache based on block numbers, excluding duplicates +/// (i.e., multiple blocks for the same number), and identifying missing blocks that +/// need to be fetched via RPC/Firehose. Returns a tuple of the found blocks and the missing block numbers. +async fn fetch_unique_blocks_from_cache( + logger: &Logger, + chain_store: Arc, + block_numbers: BTreeSet, +) -> (Vec>, Vec) { + // Load blocks from the cache + let blocks_map = chain_store + .cheap_clone() + .block_ptrs_by_numbers(block_numbers.iter().map(|&b| b.into()).collect::>()) + .await + .map_err(|e| { + error!(logger, "Error accessing block cache {}", e); + e + }) + .unwrap_or_default(); + + // Collect blocks and filter out ones with multiple entries + let blocks: Vec> = blocks_map + .into_iter() + .filter_map(|(_, values)| { + if values.len() == 1 { + Some(Arc::new(values[0].clone())) + } else { + None + } + }) + .collect(); + + // Identify missing blocks + let missing_blocks: Vec = block_numbers + .into_iter() + .filter(|&number| !blocks.iter().any(|block| block.block_number() == number)) + .collect(); + + if !missing_blocks.is_empty() { + debug!( + logger, + "Loading {} block(s) not in the block cache", + missing_blocks.len() + ); + debug!(logger, "Missing blocks {:?}", missing_blocks); + } + + (blocks, missing_blocks) +} + +/// Fetches blocks by their numbers, first attempting to load from cache. +/// Missing blocks are retrieved from an external source, with all blocks sorted and converted to `BlockFinality` format. +async fn load_blocks( + logger: &Logger, + chain_store: Arc, + block_numbers: BTreeSet, + fetch_missing: F, +) -> Result> +where + F: FnOnce(Vec) -> Fut, + Fut: Future>>>, +{ + // Fetch cached blocks and identify missing ones + let (mut cached_blocks, missing_block_numbers) = + fetch_unique_blocks_from_cache(logger, chain_store, block_numbers).await; + + // Fetch missing blocks if any + if !missing_block_numbers.is_empty() { + let missing_blocks = fetch_missing(missing_block_numbers).await?; + cached_blocks.extend(missing_blocks); + cached_blocks.sort_by_key(|block| block.number); + } + + Ok(cached_blocks.into_iter().map(BlockFinality::Ptr).collect()) +} + #[async_trait] impl TriggersAdapterTrait for TriggersAdapter { async fn scan_triggers( @@ -689,6 +840,82 @@ impl TriggersAdapterTrait for TriggersAdapter { .await } + async fn load_block_ptrs_by_numbers( + &self, + logger: Logger, + block_numbers: BTreeSet, + ) -> Result> { + match &*self.chain_client { + ChainClient::Firehose(endpoints) => { + trace!( + logger, + "Loading blocks from firehose"; + "block_numbers" => format!("{:?}", block_numbers) + ); + + let endpoint = endpoints.endpoint().await?; + let chain_store = self.chain_store.clone(); + let logger_clone = logger.clone(); + + load_blocks( + &logger, + chain_store, + block_numbers, + |missing_numbers| async move { + let blocks = endpoint + .load_blocks_by_numbers::( + missing_numbers.iter().map(|&n| n as u64).collect(), + &logger_clone, + ) + .await? + .into_iter() + .map(|block| { + Arc::new(ExtendedBlockPtr { + hash: block.hash(), + number: block.number(), + parent_hash: block.parent_hash().unwrap_or_default(), + timestamp: block.timestamp(), + }) + }) + .collect::>(); + Ok(blocks) + }, + ) + .await + } + + ChainClient::Rpc(client) => { + trace!( + logger, + "Loading blocks from RPC"; + "block_numbers" => format!("{:?}", block_numbers) + ); + + let adapter = client.cheapest_with(&self.capabilities).await?; + let chain_store = self.chain_store.clone(); + let logger_clone = logger.clone(); + + load_blocks( + &logger, + chain_store, + block_numbers, + |missing_numbers| async move { + adapter + .load_block_ptrs_by_numbers_rpc(logger_clone, missing_numbers) + .try_collect() + .await + }, + ) + .await + } + } + } + + async fn chain_head_ptr(&self) -> Result, Error> { + let chain_store = self.chain_store.clone(); + chain_store.chain_head_ptr().await + } + async fn triggers_in_block( &self, logger: &Logger, @@ -737,17 +964,30 @@ impl TriggersAdapterTrait for TriggersAdapter { triggers.append(&mut parse_block_triggers(&filter.block, full_block)); Ok(BlockWithTriggers::new(block, triggers, logger)) } + BlockFinality::Ptr(_) => unreachable!("triggers_in_block called on HeaderOnly"), } } async fn is_on_main_chain(&self, ptr: BlockPtr) -> Result { - self.chain_client - .rpc()? - .cheapest() - .await - .ok_or(anyhow!("unable to get adapter for is_on_main_chain"))? - .is_on_main_chain(&self.logger, ptr.clone()) - .await + match &*self.chain_client { + ChainClient::Firehose(endpoints) => { + let endpoint = endpoints.endpoint().await?; + let block = endpoint + .get_block_by_number::(ptr.number as u64, &self.logger) + .await + .map_err(|e| anyhow!("Failed to fetch block from firehose: {}", e))?; + + Ok(block.hash() == ptr.hash) + } + ChainClient::Rpc(adapter) => { + let adapter = adapter + .cheapest() + .await + .ok_or_else(|| anyhow!("unable to get adapter for is_on_main_chain"))?; + + adapter.is_on_main_chain(&self.logger, ptr).await + } + } } async fn ancestor_block( @@ -777,10 +1017,44 @@ impl TriggersAdapterTrait for TriggersAdapter { use graph::prelude::LightEthereumBlockExt; let block = match self.chain_client.as_ref() { - ChainClient::Firehose(_) => Some(BlockPtr { - hash: BlockHash::from(vec![0xff; 32]), - number: block.number.saturating_sub(1), - }), + ChainClient::Firehose(endpoints) => { + let chain_store = self.chain_store.cheap_clone(); + // First try to get the block from the store + if let Ok(blocks) = chain_store.blocks(vec![block.hash.clone()]).await { + if let Some(block) = blocks.first() { + if let Ok(block) = json::from_value::(block.clone()) { + return Ok(block.parent_ptr()); + } + } + } + + // If not in store, fetch from Firehose + let endpoint = endpoints.endpoint().await?; + let logger = self.logger.clone(); + let retry_log_message = + format!("get_block_by_ptr for block {} with firehose", block); + let block = block.clone(); + + retry(retry_log_message, &logger) + .limit(ENV_VARS.request_retries) + .timeout_secs(ENV_VARS.json_rpc_timeout.as_secs()) + .run(move || { + let endpoint = endpoint.cheap_clone(); + let logger = logger.cheap_clone(); + let block = block.clone(); + async move { + endpoint + .get_block_by_ptr::(&block, &logger) + .await + .context(format!( + "Failed to fetch block by ptr {} from firehose", + block + )) + } + }) + .await? + .parent_ptr() + } ChainClient::Rpc(adapters) => { let blocks = adapters .cheapest_with(&self.capabilities) @@ -945,3 +1219,137 @@ impl FirehoseMapperTrait for FirehoseMapper { .await } } + +#[cfg(test)] +mod tests { + use graph::blockchain::mock::MockChainStore; + use graph::{slog, tokio}; + + use super::*; + use std::sync::Arc; + + // Helper function to create test blocks + fn create_test_block(number: BlockNumber, hash: &str) -> ExtendedBlockPtr { + let hash = BlockHash(hash.as_bytes().to_vec().into_boxed_slice()); + let ptr = BlockPtr::new(hash.clone(), number); + ExtendedBlockPtr { + hash, + number, + parent_hash: BlockHash(vec![0; 32].into_boxed_slice()), + timestamp: BlockTime::for_test(&ptr), + } + } + + #[tokio::test] + async fn test_fetch_unique_blocks_single_block() { + let logger = Logger::root(slog::Discard, o!()); + let mut chain_store = MockChainStore::default(); + + // Add a single block + let block = create_test_block(1, "block1"); + chain_store.blocks.insert(1, vec![block.clone()]); + + let block_numbers: BTreeSet<_> = vec![1].into_iter().collect(); + + let (blocks, missing) = + fetch_unique_blocks_from_cache(&logger, Arc::new(chain_store), block_numbers).await; + + assert_eq!(blocks.len(), 1); + assert_eq!(blocks[0].number, 1); + assert!(missing.is_empty()); + } + + #[tokio::test] + async fn test_fetch_unique_blocks_duplicate_blocks() { + let logger = Logger::root(slog::Discard, o!()); + let mut chain_store = MockChainStore::default(); + + // Add multiple blocks for the same number + let block1 = create_test_block(1, "block1a"); + let block2 = create_test_block(1, "block1b"); + chain_store + .blocks + .insert(1, vec![block1.clone(), block2.clone()]); + + let block_numbers: BTreeSet<_> = vec![1].into_iter().collect(); + + let (blocks, missing) = + fetch_unique_blocks_from_cache(&logger, Arc::new(chain_store), block_numbers).await; + + // Should filter out the duplicate block + assert!(blocks.is_empty()); + assert_eq!(missing, vec![1]); + assert_eq!(missing[0], 1); + } + + #[tokio::test] + async fn test_fetch_unique_blocks_missing_blocks() { + let logger = Logger::root(slog::Discard, o!()); + let mut chain_store = MockChainStore::default(); + + // Add block number 1 but not 2 + let block = create_test_block(1, "block1"); + chain_store.blocks.insert(1, vec![block.clone()]); + + let block_numbers: BTreeSet<_> = vec![1, 2].into_iter().collect(); + + let (blocks, missing) = + fetch_unique_blocks_from_cache(&logger, Arc::new(chain_store), block_numbers).await; + + assert_eq!(blocks.len(), 1); + assert_eq!(blocks[0].number, 1); + assert_eq!(missing, vec![2]); + } + + #[tokio::test] + async fn test_fetch_unique_blocks_multiple_valid_blocks() { + let logger = Logger::root(slog::Discard, o!()); + let mut chain_store = MockChainStore::default(); + + // Add multiple valid blocks + let block1 = create_test_block(1, "block1"); + let block2 = create_test_block(2, "block2"); + chain_store.blocks.insert(1, vec![block1.clone()]); + chain_store.blocks.insert(2, vec![block2.clone()]); + + let block_numbers: BTreeSet<_> = vec![1, 2].into_iter().collect(); + + let (blocks, missing) = + fetch_unique_blocks_from_cache(&logger, Arc::new(chain_store), block_numbers).await; + + assert_eq!(blocks.len(), 2); + assert!(blocks.iter().any(|b| b.number == 1)); + assert!(blocks.iter().any(|b| b.number == 2)); + assert!(missing.is_empty()); + } + + #[tokio::test] + async fn test_fetch_unique_blocks_mixed_scenario() { + let logger = Logger::root(slog::Discard, o!()); + let mut chain_store = MockChainStore::default(); + + // Add a mix of scenarios: + // - Block 1: Single valid block + // - Block 2: Multiple blocks (duplicate) + // - Block 3: Missing + let block1 = create_test_block(1, "block1"); + let block2a = create_test_block(2, "block2a"); + let block2b = create_test_block(2, "block2b"); + + chain_store.blocks.insert(1, vec![block1.clone()]); + chain_store + .blocks + .insert(2, vec![block2a.clone(), block2b.clone()]); + + let block_numbers: BTreeSet<_> = vec![1, 2, 3].into_iter().collect(); + + let (blocks, missing) = + fetch_unique_blocks_from_cache(&logger, Arc::new(chain_store), block_numbers).await; + + assert_eq!(blocks.len(), 1); + assert_eq!(blocks[0].number, 1); + assert_eq!(missing.len(), 2); + assert!(missing.contains(&2)); + assert!(missing.contains(&3)); + } +} diff --git a/chain/ethereum/src/data_source.rs b/chain/ethereum/src/data_source.rs index c0253d2e60e..a2da3e6cb4e 100644 --- a/chain/ethereum/src/data_source.rs +++ b/chain/ethereum/src/data_source.rs @@ -5,20 +5,19 @@ use graph::components::metrics::subgraph::SubgraphInstanceMetrics; use graph::components::store::{EthereumCallCache, StoredDynamicDataSource}; use graph::components::subgraph::{HostMetrics, InstanceDSTemplateInfo, MappingError}; use graph::components::trigger_processor::RunnableTriggers; -use graph::data::value::Word; -use graph::data_source::CausalityRegion; +use graph::data_source::common::{ + CallDecls, DeclaredCall, FindMappingABI, MappingABI, UnresolvedMappingABI, +}; +use graph::data_source::{CausalityRegion, MappingTrigger as MappingTriggerType}; use graph::env::ENV_VARS; use graph::futures03::future::try_join; use graph::futures03::stream::FuturesOrdered; use graph::futures03::TryStreamExt; use graph::prelude::ethabi::ethereum_types::H160; -use graph::prelude::ethabi::{StateMutability, Token}; -use graph::prelude::lazy_static; -use graph::prelude::regex::Regex; +use graph::prelude::ethabi::StateMutability; use graph::prelude::{Link, SubgraphManifestValidationError}; use graph::slog::{debug, error, o, trace}; use itertools::Itertools; -use serde::de; use serde::de::Error as ErrorD; use serde::{Deserialize, Deserializer}; use std::collections::HashSet; @@ -30,10 +29,9 @@ use tiny_keccak::{keccak256, Keccak}; use graph::{ blockchain::{self, Blockchain}, - derive::CheapClone, prelude::{ async_trait, - ethabi::{Address, Contract, Event, Function, LogParam, ParamType, RawLog}, + ethabi::{Address, Event, Function, LogParam, ParamType, RawLog}, serde_json, warn, web3::types::{Log, Transaction, H256}, BlockNumber, CheapClone, EthereumCall, LightEthereumBlock, LightEthereumBlockExt, @@ -50,7 +48,7 @@ use crate::adapter::EthereumAdapter as _; use crate::chain::Chain; use crate::network::EthereumNetworkAdapters; use crate::trigger::{EthereumBlockTriggerType, EthereumTrigger, MappingTrigger}; -use crate::{ContractCall, NodeCapabilities}; +use crate::NodeCapabilities; // The recommended kind is `ethereum`, `ethereum/contract` is accepted for backwards compatibility. const ETHEREUM_KINDS: &[&str] = &["ethereum/contract", "ethereum"]; @@ -802,7 +800,12 @@ impl DataSource { "transaction" => format!("{}", &transaction.hash), }); let handler = event_handler.handler.clone(); - let calls = DeclaredCall::new(&self.mapping, &event_handler, &log, ¶ms)?; + let calls = DeclaredCall::from_log_trigger( + &self.mapping, + &event_handler.calls, + &log, + ¶ms, + )?; Ok(Some(TriggerWithHandler::::new_with_logging_extras( MappingTrigger::Log { block: block.cheap_clone(), @@ -933,73 +936,6 @@ impl DataSource { } } -#[derive(Clone, Debug, PartialEq)] -pub struct DeclaredCall { - /// The user-supplied label from the manifest - label: String, - contract_name: String, - address: Address, - function: Function, - args: Vec, -} - -impl DeclaredCall { - fn new( - mapping: &Mapping, - handler: &MappingEventHandler, - log: &Log, - params: &[LogParam], - ) -> Result, anyhow::Error> { - let mut calls = Vec::new(); - for decl in handler.calls.decls.iter() { - let contract_name = decl.expr.abi.to_string(); - let function_name = decl.expr.func.as_str(); - // Obtain the path to the contract ABI - let abi = mapping.find_abi(&contract_name)?; - // TODO: Handle overloaded functions - let function = { - // Behavior for apiVersion < 0.0.4: look up function by name; for overloaded - // functions this always picks the same overloaded variant, which is incorrect - // and may lead to encoding/decoding errors - abi.contract.function(function_name).with_context(|| { - format!( - "Unknown function \"{}::{}\" called from WASM runtime", - contract_name, function_name - ) - })? - }; - - let address = decl.address(log, params)?; - let args = decl.args(log, params)?; - - let call = DeclaredCall { - label: decl.label.clone(), - contract_name, - address, - function: function.clone(), - args, - }; - calls.push(call); - } - - Ok(calls) - } - - fn as_eth_call(self, block_ptr: BlockPtr, gas: Option) -> (ContractCall, String) { - ( - ContractCall { - contract_name: self.contract_name, - address: self.address, - block_ptr, - function: self.function, - args: self.args, - gas, - }, - self.label, - ) - } -} - pub struct DecoderHook { eth_adapters: Arc, call_cache: Arc, @@ -1098,6 +1034,115 @@ impl DecoderHook { .collect(); Ok(labels) } + + fn collect_declared_calls<'a>( + &self, + runnables: &Vec>, + ) -> Vec<(Arc, DeclaredCall)> { + // Extract all hosted triggers from runnables + let all_triggers = runnables + .iter() + .flat_map(|runnable| &runnable.hosted_triggers); + + // Collect calls from both onchain and subgraph triggers + let mut all_calls = Vec::new(); + + for trigger in all_triggers { + let host_metrics = trigger.host.host_metrics(); + + match &trigger.mapping_trigger.trigger { + MappingTriggerType::Onchain(t) => { + if let MappingTrigger::Log { calls, .. } = t { + for call in calls.clone() { + all_calls.push((host_metrics.cheap_clone(), call)); + } + } + } + MappingTriggerType::Subgraph(t) => { + for call in t.calls.clone() { + // Convert subgraph call to the expected DeclaredCall type if needed + // or handle differently based on the types + all_calls.push((host_metrics.cheap_clone(), call)); + } + } + MappingTriggerType::Offchain(_) => {} + } + } + + all_calls + } + + /// Deduplicate calls. Unfortunately, we can't get `DeclaredCall` to + /// implement `Hash` or `Ord` easily, so we can only deduplicate by + /// comparing the whole call not with a `HashSet` or `BTreeSet`. + /// Since that can be inefficient, we don't deduplicate if we have an + /// enormous amount of calls; in that case though, things will likely + /// blow up because of the amount of I/O that many calls cause. + /// Cutting off at 1000 is fairly arbitrary + fn deduplicate_calls( + &self, + calls: Vec<(Arc, DeclaredCall)>, + ) -> Vec<(Arc, DeclaredCall)> { + if calls.len() >= 1000 { + return calls; + } + + let mut uniq_calls = Vec::new(); + for (metrics, call) in calls { + if !uniq_calls.iter().any(|(_, c)| c == &call) { + uniq_calls.push((metrics, call)); + } + } + uniq_calls + } + + /// Log information about failed eth calls. 'Failure' here simply + /// means that the call was reverted; outright errors lead to a real + /// error. For reverted calls, `self.eth_calls` returns the label + /// from the manifest for that call. + /// + /// One reason why declared calls can fail is if they are attached + /// to the wrong handler, or if arguments are specified incorrectly. + /// Calls that revert every once in a while might be ok and what the + /// user intended, but we want to clearly log so that users can spot + /// mistakes in their manifest, which will lead to unnecessary eth + /// calls + fn log_declared_call_results( + logger: &Logger, + failures: &[String], + calls_count: usize, + trigger_count: usize, + elapsed: Duration, + ) { + let fail_count = failures.len(); + + if fail_count > 0 { + let mut counts: Vec<_> = failures.iter().counts().into_iter().collect(); + counts.sort_by_key(|(label, _)| *label); + + let failure_summary = counts + .into_iter() + .map(|(label, count)| { + let times = if count == 1 { "time" } else { "times" }; + format!("{label} ({count} {times})") + }) + .join(", "); + + error!(logger, "Declared calls failed"; + "triggers" => trigger_count, + "calls_count" => calls_count, + "fail_count" => fail_count, + "calls_ms" => elapsed.as_millis(), + "failures" => format!("[{}]", failure_summary) + ); + } else { + debug!(logger, "Declared calls"; + "triggers" => trigger_count, + "calls_count" => calls_count, + "calls_ms" => elapsed.as_millis() + ); + } + } } #[async_trait] @@ -1109,50 +1154,6 @@ impl blockchain::DecoderHook for DecoderHook { runnables: Vec>, metrics: &Arc, ) -> Result>, MappingError> { - /// Log information about failed eth calls. 'Failure' here simply - /// means that the call was reverted; outright errors lead to a real - /// error. For reverted calls, `self.eth_calls` returns the label - /// from the manifest for that call. - /// - /// One reason why declared calls can fail is if they are attached - /// to the wrong handler, or if arguments are specified incorrectly. - /// Calls that revert every once in a while might be ok and what the - /// user intended, but we want to clearly log so that users can spot - /// mistakes in their manifest, which will lead to unnecessary eth - /// calls - fn log_results( - logger: &Logger, - failures: &[String], - calls_count: usize, - trigger_count: usize, - elapsed: Duration, - ) { - let fail_count = failures.len(); - - if fail_count > 0 { - let mut counts: Vec<_> = failures.iter().counts().into_iter().collect(); - counts.sort_by_key(|(label, _)| *label); - let counts = counts - .into_iter() - .map(|(label, count)| { - let times = if count == 1 { "time" } else { "times" }; - format!("{label} ({count} {times})") - }) - .join(", "); - error!(logger, "Declared calls failed"; - "triggers" => trigger_count, - "calls_count" => calls_count, - "fail_count" => fail_count, - "calls_ms" => elapsed.as_millis(), - "failures" => format!("[{}]", counts)); - } else { - debug!(logger, "Declared calls"; - "triggers" => trigger_count, - "calls_count" => calls_count, - "calls_ms" => elapsed.as_millis()); - } - } - if ENV_VARS.mappings.disable_declared_calls { return Ok(runnables); } @@ -1160,51 +1161,17 @@ impl blockchain::DecoderHook for DecoderHook { let _section = metrics.stopwatch.start_section("declared_ethereum_call"); let start = Instant::now(); - let calls: Vec<_> = runnables - .iter() - .map(|r| &r.hosted_triggers) - .flatten() - .filter_map(|trigger| { - trigger - .mapping_trigger - .trigger - .as_onchain() - .map(|t| (trigger.host.host_metrics(), t)) - }) - .filter_map(|(metrics, trigger)| match trigger { - MappingTrigger::Log { calls, .. } => Some( - calls - .clone() - .into_iter() - .map(move |call| (metrics.cheap_clone(), call)), - ), - MappingTrigger::Block { .. } | MappingTrigger::Call { .. } => None, - }) - .flatten() - .collect(); + // Collect and process declared calls + let calls = self.collect_declared_calls(&runnables); + let deduplicated_calls = self.deduplicate_calls(calls); - // Deduplicate calls. Unfortunately, we can't get `DeclaredCall` to - // implement `Hash` or `Ord` easily, so we can only deduplicate by - // comparing the whole call not with a `HashSet` or `BTreeSet`. - // Since that can be inefficient, we don't deduplicate if we have an - // enormous amount of calls; in that case though, things will likely - // blow up because of the amount of I/O that many calls cause. - // Cutting off at 1000 is fairly arbitrary - let calls = if calls.len() < 1000 { - let mut uniq_calls = Vec::new(); - for (metrics, call) in calls { - if !uniq_calls.iter().any(|(_, c)| c == &call) { - uniq_calls.push((metrics, call)); - } - } - uniq_calls - } else { - calls - }; + // Execute calls and log results + let calls_count = deduplicated_calls.len(); + let results = self + .eth_calls(logger, block_ptr, deduplicated_calls) + .await?; - let calls_count = calls.len(); - let results = self.eth_calls(logger, block_ptr, calls).await?; - log_results( + Self::log_declared_call_results( logger, &results, calls_count, @@ -1372,8 +1339,10 @@ impl Mapping { .iter() .any(|handler| matches!(handler.filter, Some(BlockHandlerFilter::Call))) } +} - pub fn find_abi(&self, abi_name: &str) -> Result, Error> { +impl FindMappingABI for Mapping { + fn find_abi(&self, abi_name: &str) -> Result, Error> { Ok(self .abis .iter() @@ -1436,82 +1405,6 @@ impl UnresolvedMapping { } } -#[derive(Clone, Debug, Hash, Eq, PartialEq, Deserialize)] -pub struct UnresolvedMappingABI { - pub name: String, - pub file: Link, -} - -impl UnresolvedMappingABI { - pub async fn resolve( - self, - resolver: &Arc, - logger: &Logger, - ) -> Result { - let contract_bytes = resolver.cat(logger, &self.file).await.with_context(|| { - format!( - "failed to resolve ABI {} from {}", - self.name, self.file.link - ) - })?; - let contract = Contract::load(&*contract_bytes)?; - Ok(MappingABI { - name: self.name, - contract, - }) - } -} - -#[derive(Clone, Debug, PartialEq)] -pub struct MappingABI { - pub name: String, - pub contract: Contract, -} - -impl MappingABI { - pub fn function( - &self, - contract_name: &str, - name: &str, - signature: Option<&str>, - ) -> Result<&Function, Error> { - let contract = &self.contract; - let function = match signature { - // Behavior for apiVersion < 0.0.4: look up function by name; for overloaded - // functions this always picks the same overloaded variant, which is incorrect - // and may lead to encoding/decoding errors - None => contract.function(name).with_context(|| { - format!( - "Unknown function \"{}::{}\" called from WASM runtime", - contract_name, name - ) - })?, - - // Behavior for apiVersion >= 0.0.04: look up function by signature of - // the form `functionName(uint256,string) returns (bytes32,string)`; this - // correctly picks the correct variant of an overloaded function - Some(ref signature) => contract - .functions_by_name(name) - .with_context(|| { - format!( - "Unknown function \"{}::{}\" called from WASM runtime", - contract_name, name - ) - })? - .iter() - .find(|f| signature == &f.signature()) - .with_context(|| { - format!( - "Unknown function \"{}::{}\" with signature `{}` \ - called from WASM runtime", - contract_name, name, signature, - ) - })?, - }; - Ok(function) - } -} - #[derive(Clone, Debug, Hash, Eq, PartialEq, Deserialize)] pub struct MappingBlockHandler { pub handler: String, @@ -1644,225 +1537,3 @@ fn string_to_h256(s: &str) -> H256 { pub struct TemplateSource { pub abi: String, } - -/// Internal representation of declared calls. In the manifest that's -/// written as part of an event handler as -/// ```yaml -/// calls: -/// - myCall1: Contract[address].function(arg1, arg2, ...) -/// - .. -/// ``` -/// -/// The `address` and `arg` fields can be either `event.address` or -/// `event.params.`. Each entry under `calls` gets turned into a -/// `CallDcl` -#[derive(Clone, CheapClone, Debug, Default, Hash, Eq, PartialEq)] -pub struct CallDecls { - pub decls: Arc>, - readonly: (), -} - -/// A single call declaration, like `myCall1: -/// Contract[address].function(arg1, arg2, ...)` -#[derive(Clone, Debug, Hash, Eq, PartialEq)] -pub struct CallDecl { - /// A user-defined label - pub label: String, - /// The call expression - pub expr: CallExpr, - readonly: (), -} -impl CallDecl { - fn address(&self, log: &Log, params: &[LogParam]) -> Result { - let address = match &self.expr.address { - CallArg::Address => log.address, - CallArg::HexAddress(address) => *address, - CallArg::Param(name) => { - let value = params - .iter() - .find(|param| ¶m.name == name.as_str()) - .ok_or_else(|| anyhow!("unknown param {name}"))? - .value - .clone(); - value - .into_address() - .ok_or_else(|| anyhow!("param {name} is not an address"))? - } - }; - Ok(address) - } - - fn args(&self, log: &Log, params: &[LogParam]) -> Result, Error> { - self.expr - .args - .iter() - .map(|arg| match arg { - CallArg::Address => Ok(Token::Address(log.address)), - CallArg::HexAddress(address) => Ok(Token::Address(*address)), - CallArg::Param(name) => { - let value = params - .iter() - .find(|param| ¶m.name == name.as_str()) - .ok_or_else(|| anyhow!("unknown param {name}"))? - .value - .clone(); - Ok(value) - } - }) - .collect() - } -} - -impl<'de> de::Deserialize<'de> for CallDecls { - fn deserialize(deserializer: D) -> Result - where - D: de::Deserializer<'de>, - { - let decls: std::collections::HashMap = - de::Deserialize::deserialize(deserializer)?; - let decls = decls - .into_iter() - .map(|(name, expr)| { - expr.parse::().map(|expr| CallDecl { - label: name, - expr, - readonly: (), - }) - }) - .collect::>() - .map(|decls| Arc::new(decls)) - .map_err(de::Error::custom)?; - Ok(CallDecls { - decls, - readonly: (), - }) - } -} - -#[derive(Clone, Debug, Hash, Eq, PartialEq)] -pub struct CallExpr { - pub abi: Word, - pub address: CallArg, - pub func: Word, - pub args: Vec, - readonly: (), -} - -/// Parse expressions of the form `Contract[address].function(arg1, arg2, -/// ...)` where the `address` and the args are either `event.address` or -/// `event.params.`. -/// -/// The parser is pretty awful as it generates error messages that aren't -/// very helpful. We should replace all this with a real parser, most likely -/// `combine` which is what `graphql_parser` uses -impl FromStr for CallExpr { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - lazy_static! { - static ref RE: Regex = Regex::new( - r"(?x) - (?P[a-zA-Z0-9_]+)\[ - (?P
[^]]+)\] - \. - (?P[a-zA-Z0-9_]+)\( - (?P[^)]*) - \)" - ) - .unwrap(); - } - let x = RE - .captures(s) - .ok_or_else(|| anyhow!("invalid call expression `{s}`"))?; - let abi = Word::from(x.name("abi").unwrap().as_str()); - let address = x.name("address").unwrap().as_str().parse()?; - let func = Word::from(x.name("func").unwrap().as_str()); - let args: Vec = x - .name("args") - .unwrap() - .as_str() - .split(',') - .filter(|s| !s.is_empty()) - .map(|s| s.trim().parse::()) - .collect::>()?; - Ok(CallExpr { - abi, - address, - func, - args, - readonly: (), - }) - } -} - -#[derive(Clone, Debug, Hash, Eq, PartialEq)] -pub enum CallArg { - HexAddress(Address), - Address, - Param(Word), -} - -lazy_static! { - // Matches a 40-character hexadecimal string prefixed with '0x', typical for Ethereum addresses - static ref ADDR_RE: Regex = Regex::new(r"^0x[0-9a-fA-F]{40}$").unwrap(); -} - -impl FromStr for CallArg { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - if ADDR_RE.is_match(s) { - if let Ok(parsed_address) = Address::from_str(s) { - return Ok(CallArg::HexAddress(parsed_address)); - } - } - - let mut parts = s.split('.'); - match (parts.next(), parts.next(), parts.next()) { - (Some("event"), Some("address"), None) => Ok(CallArg::Address), - (Some("event"), Some("params"), Some(param)) => Ok(CallArg::Param(Word::from(param))), - _ => Err(anyhow!("invalid call argument `{}`", s)), - } - } -} - -#[test] -fn test_call_expr() { - let expr: CallExpr = "ERC20[event.address].balanceOf(event.params.token)" - .parse() - .unwrap(); - assert_eq!(expr.abi, "ERC20"); - assert_eq!(expr.address, CallArg::Address); - assert_eq!(expr.func, "balanceOf"); - assert_eq!(expr.args, vec![CallArg::Param("token".into())]); - - let expr: CallExpr = "Pool[event.params.pool].fees(event.params.token0, event.params.token1)" - .parse() - .unwrap(); - assert_eq!(expr.abi, "Pool"); - assert_eq!(expr.address, CallArg::Param("pool".into())); - assert_eq!(expr.func, "fees"); - assert_eq!( - expr.args, - vec![ - CallArg::Param("token0".into()), - CallArg::Param("token1".into()) - ] - ); - - let expr: CallExpr = "Pool[event.address].growth()".parse().unwrap(); - assert_eq!(expr.abi, "Pool"); - assert_eq!(expr.address, CallArg::Address); - assert_eq!(expr.func, "growth"); - assert_eq!(expr.args, vec![]); - - let expr: CallExpr = "Pool[0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF].growth(0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF)" - .parse() - .unwrap(); - let call_arg = - CallArg::HexAddress(H160::from_str("0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF").unwrap()); - assert_eq!(expr.abi, "Pool"); - assert_eq!(expr.address, call_arg); - assert_eq!(expr.func, "growth"); - assert_eq!(expr.args, vec![call_arg]); -} diff --git a/chain/ethereum/src/env.rs b/chain/ethereum/src/env.rs index 75c313212b9..bc7223dbc07 100644 --- a/chain/ethereum/src/env.rs +++ b/chain/ethereum/src/env.rs @@ -33,6 +33,9 @@ pub struct EnvVars { /// Set by the environment variable `ETHEREUM_BLOCK_BATCH_SIZE`. The /// default value is 10 blocks. pub block_batch_size: usize, + /// Set by the environment variable `ETHEREUM_BLOCK_PTR_BATCH_SIZE`. The + /// default value is 10 blocks. + pub block_ptr_batch_size: usize, /// Maximum number of blocks to request in each chunk. /// /// Set by the environment variable `GRAPH_ETHEREUM_MAX_BLOCK_RANGE_SIZE`. @@ -116,6 +119,7 @@ impl From for EnvVars { trace_stream_step_size: x.trace_stream_step_size, max_event_only_range: x.max_event_only_range, block_batch_size: x.block_batch_size, + block_ptr_batch_size: x.block_ptr_batch_size, max_block_range_size: x.max_block_range_size, json_rpc_timeout: Duration::from_secs(x.json_rpc_timeout_in_secs), block_receipts_check_timeout: Duration::from_secs( @@ -160,6 +164,8 @@ struct Inner { max_event_only_range: BlockNumber, #[envconfig(from = "ETHEREUM_BLOCK_BATCH_SIZE", default = "10")] block_batch_size: usize, + #[envconfig(from = "ETHEREUM_BLOCK_PTR_BATCH_SIZE", default = "100")] + block_ptr_batch_size: usize, #[envconfig(from = "GRAPH_ETHEREUM_MAX_BLOCK_RANGE_SIZE", default = "2000")] max_block_range_size: BlockNumber, #[envconfig(from = "GRAPH_ETHEREUM_JSON_RPC_TIMEOUT", default = "180")] diff --git a/chain/ethereum/src/ethereum_adapter.rs b/chain/ethereum/src/ethereum_adapter.rs index c4ea6323c7d..7173c069c65 100644 --- a/chain/ethereum/src/ethereum_adapter.rs +++ b/chain/ethereum/src/ethereum_adapter.rs @@ -2,11 +2,14 @@ use futures03::{future::BoxFuture, stream::FuturesUnordered}; use graph::blockchain::client::ChainClient; use graph::blockchain::BlockHash; use graph::blockchain::ChainIdentifier; +use graph::blockchain::ExtendedBlockPtr; + use graph::components::transaction_receipt::LightTransactionReceipt; use graph::data::store::ethereum::call; use graph::data::store::scalar; use graph::data::subgraph::UnifiedMappingApiVersion; use graph::data::subgraph::API_VERSION_0_0_7; +use graph::data_source::common::ContractCall; use graph::futures01::stream; use graph::futures01::Future; use graph::futures01::Stream; @@ -58,15 +61,16 @@ use crate::chain::BlockFinality; use crate::trigger::LogRef; use crate::Chain; use crate::NodeCapabilities; +use crate::TriggerFilter; use crate::{ adapter::{ - ContractCall, ContractCallError, EthGetLogsFilter, EthereumAdapter as EthereumAdapterTrait, + ContractCallError, EthGetLogsFilter, EthereumAdapter as EthereumAdapterTrait, EthereumBlockFilter, EthereumCallFilter, EthereumLogFilter, ProviderEthRpcMetrics, SubgraphEthRpcMetrics, }, transport::Transport, trigger::{EthereumBlockTriggerType, EthereumTrigger}, - TriggerFilter, ENV_VARS, + ENV_VARS, }; #[derive(Debug, Clone)] @@ -403,6 +407,7 @@ impl EthereumAdapter { "503 Service Unavailable", // Alchemy "ServerError(-32000)", // Alchemy "Try with this block range", // zKSync era + "block range too large", // Monad ]; if from > to { @@ -780,6 +785,64 @@ impl EthereumAdapter { .buffered(ENV_VARS.block_batch_size) } + /// Request blocks by number through JSON-RPC. + pub fn load_block_ptrs_by_numbers_rpc( + &self, + logger: Logger, + numbers: Vec, + ) -> impl futures03::Stream, Error>> + Send { + let web3 = self.web3.clone(); + + futures03::stream::iter(numbers.into_iter().map(move |number| { + let web3 = web3.clone(); + let logger = logger.clone(); + + async move { + retry(format!("load block {}", number), &logger) + .limit(ENV_VARS.request_retries) + .timeout_secs(ENV_VARS.json_rpc_timeout.as_secs()) + .run(move || { + let web3 = web3.clone(); + + async move { + let block_result = web3 + .eth() + .block(BlockId::Number(Web3BlockNumber::Number(number.into()))) + .await; + + match block_result { + Ok(Some(block)) => { + let ptr = ExtendedBlockPtr::try_from(( + block.hash, + block.number, + block.parent_hash, + block.timestamp, + )) + .map_err(|e| { + anyhow::anyhow!("Failed to convert block: {}", e) + })?; + Ok(Arc::new(ptr)) + } + Ok(None) => Err(anyhow::anyhow!( + "Ethereum node did not find block with number {:?}", + number + )), + Err(e) => Err(anyhow::anyhow!("Failed to fetch block: {}", e)), + } + } + }) + .await + .map_err(|e| match e { + TimeoutError::Elapsed => { + anyhow::anyhow!("Timeout while fetching block {}", number) + } + TimeoutError::Inner(e) => e, + }) + } + })) + .buffered(ENV_VARS.block_ptr_batch_size) + } + /// Request blocks ptrs for numbers through JSON-RPC. /// /// Reorg safety: If ids are numbers, they must be a final blocks. @@ -1923,6 +1986,9 @@ pub(crate) async fn get_calls( calls: Some(calls), })) } + BlockFinality::Ptr(_) => { + unreachable!("get_calls called with BlockFinality::Ptr") + } } } @@ -2077,8 +2143,8 @@ async fn filter_call_triggers_from_unsuccessful_transactions( let transaction_hashes: BTreeSet = block .trigger_data .iter() - .filter_map(|trigger| match trigger { - EthereumTrigger::Call(call_trigger) => Some(call_trigger.transaction_hash), + .filter_map(|trigger| match trigger.as_chain() { + Some(EthereumTrigger::Call(call_trigger)) => Some(call_trigger.transaction_hash), _ => None, }) .collect::>>() @@ -2104,6 +2170,11 @@ async fn filter_call_triggers_from_unsuccessful_transactions( "this function should not be called when dealing with non-final blocks" ) } + BlockFinality::Ptr(_block) => { + unreachable!( + "this function should not be called when dealing with header-only blocks" + ) + } } }; @@ -2169,7 +2240,7 @@ async fn filter_call_triggers_from_unsuccessful_transactions( // Filter call triggers from unsuccessful transactions block.trigger_data.retain(|trigger| { - if let EthereumTrigger::Call(call_trigger) = trigger { + if let Some(EthereumTrigger::Call(call_trigger)) = trigger.as_chain() { // Unwrap: We already checked that those values exist transaction_success[&call_trigger.transaction_hash.unwrap()] } else { diff --git a/chain/ethereum/src/lib.rs b/chain/ethereum/src/lib.rs index b83415146ac..8cf4e4cc669 100644 --- a/chain/ethereum/src/lib.rs +++ b/chain/ethereum/src/lib.rs @@ -19,7 +19,7 @@ pub use buffered_call_cache::BufferedCallCache; // ETHDEP: These concrete types should probably not be exposed. pub use data_source::{ - BlockHandlerFilter, DataSource, DataSourceTemplate, Mapping, MappingABI, TemplateSource, + BlockHandlerFilter, DataSource, DataSourceTemplate, Mapping, TemplateSource, }; pub mod chain; @@ -28,8 +28,8 @@ pub mod network; pub mod trigger; pub use crate::adapter::{ - ContractCall, ContractCallError, EthereumAdapter as EthereumAdapterTrait, - ProviderEthRpcMetrics, SubgraphEthRpcMetrics, TriggerFilter, + ContractCallError, EthereumAdapter as EthereumAdapterTrait, ProviderEthRpcMetrics, + SubgraphEthRpcMetrics, TriggerFilter, }; pub use crate::chain::Chain; pub use graph::blockchain::BlockIngestor; diff --git a/chain/ethereum/src/runtime/runtime_adapter.rs b/chain/ethereum/src/runtime/runtime_adapter.rs index 4147d61f5b0..01f148bdd4c 100644 --- a/chain/ethereum/src/runtime/runtime_adapter.rs +++ b/chain/ethereum/src/runtime/runtime_adapter.rs @@ -1,10 +1,9 @@ use std::{sync::Arc, time::Instant}; use crate::adapter::EthereumRpcError; -use crate::data_source::MappingABI; use crate::{ - capabilities::NodeCapabilities, network::EthereumNetworkAdapters, Chain, ContractCall, - ContractCallError, DataSource, EthereumAdapter, EthereumAdapterTrait, ENV_VARS, + capabilities::NodeCapabilities, network::EthereumNetworkAdapters, Chain, ContractCallError, + EthereumAdapter, EthereumAdapterTrait, ENV_VARS, }; use anyhow::{anyhow, Context, Error}; use blockchain::HostFn; @@ -13,6 +12,8 @@ use graph::components::subgraph::HostMetrics; use graph::data::store::ethereum::call; use graph::data::store::scalar::BigInt; use graph::data::subgraph::API_VERSION_0_0_9; +use graph::data_source; +use graph::data_source::common::{ContractCall, MappingABI}; use graph::futures03::compat::Future01CompatExt; use graph::prelude::web3::types::H160; use graph::runtime::gas::Gas; @@ -80,58 +81,93 @@ pub fn eth_call_gas(chain_identifier: &ChainIdentifier) -> Option { } impl blockchain::RuntimeAdapter for RuntimeAdapter { - fn host_fns(&self, ds: &DataSource) -> Result, Error> { - let abis = ds.mapping.abis.clone(); - let call_cache = self.call_cache.cheap_clone(); - let eth_adapters = self.eth_adapters.cheap_clone(); - let archive = ds.mapping.requires_archive()?; - let eth_call_gas = eth_call_gas(&self.chain_identifier); - - let ethereum_call = HostFn { - name: "ethereum.call", - func: Arc::new(move |ctx, wasm_ptr| { - // Ethereum calls should prioritise call-only adapters if one is available. - let eth_adapter = eth_adapters.call_or_cheapest(Some(&NodeCapabilities { - archive, - traces: false, - }))?; - ethereum_call( - ð_adapter, - call_cache.cheap_clone(), - ctx, - wasm_ptr, - &abis, - eth_call_gas, - ) - .map(|ptr| ptr.wasm_ptr()) - }), - }; - - let eth_adapters = self.eth_adapters.cheap_clone(); - let ethereum_get_balance = HostFn { - name: "ethereum.getBalance", - func: Arc::new(move |ctx, wasm_ptr| { - let eth_adapter = eth_adapters.unverified_cheapest_with(&NodeCapabilities { - archive, - traces: false, - })?; - eth_get_balance(ð_adapter, ctx, wasm_ptr).map(|ptr| ptr.wasm_ptr()) - }), - }; + fn host_fns(&self, ds: &data_source::DataSource) -> Result, Error> { + fn create_host_fns( + abis: Arc>>, // Use Arc to ensure `'static` lifetimes. + archive: bool, + call_cache: Arc, + eth_adapters: Arc, + eth_call_gas: Option, + ) -> Vec { + vec![ + HostFn { + name: "ethereum.call", + func: Arc::new({ + let eth_adapters = eth_adapters.clone(); + let call_cache = call_cache.clone(); + let abis = abis.clone(); + move |ctx, wasm_ptr| { + let eth_adapter = + eth_adapters.call_or_cheapest(Some(&NodeCapabilities { + archive, + traces: false, + }))?; + ethereum_call( + ð_adapter, + call_cache.clone(), + ctx, + wasm_ptr, + &abis, + eth_call_gas, + ) + .map(|ptr| ptr.wasm_ptr()) + } + }), + }, + HostFn { + name: "ethereum.getBalance", + func: Arc::new({ + let eth_adapters = eth_adapters.clone(); + move |ctx, wasm_ptr| { + let eth_adapter = + eth_adapters.unverified_cheapest_with(&NodeCapabilities { + archive, + traces: false, + })?; + eth_get_balance(ð_adapter, ctx, wasm_ptr).map(|ptr| ptr.wasm_ptr()) + } + }), + }, + HostFn { + name: "ethereum.hasCode", + func: Arc::new({ + let eth_adapters = eth_adapters.clone(); + move |ctx, wasm_ptr| { + let eth_adapter = + eth_adapters.unverified_cheapest_with(&NodeCapabilities { + archive, + traces: false, + })?; + eth_has_code(ð_adapter, ctx, wasm_ptr).map(|ptr| ptr.wasm_ptr()) + } + }), + }, + ] + } - let eth_adapters = self.eth_adapters.cheap_clone(); - let ethereum_get_code = HostFn { - name: "ethereum.hasCode", - func: Arc::new(move |ctx, wasm_ptr| { - let eth_adapter = eth_adapters.unverified_cheapest_with(&NodeCapabilities { - archive, - traces: false, - })?; - eth_has_code(ð_adapter, ctx, wasm_ptr).map(|ptr| ptr.wasm_ptr()) - }), + let host_fns = match ds { + data_source::DataSource::Onchain(onchain_ds) => { + let abis = Arc::new(onchain_ds.mapping.abis.clone()); + let archive = onchain_ds.mapping.requires_archive()?; + let call_cache = self.call_cache.cheap_clone(); + let eth_adapters = self.eth_adapters.cheap_clone(); + let eth_call_gas = eth_call_gas(&self.chain_identifier); + + create_host_fns(abis, archive, call_cache, eth_adapters, eth_call_gas) + } + data_source::DataSource::Subgraph(subgraph_ds) => { + let abis = Arc::new(subgraph_ds.mapping.abis.clone()); + let archive = subgraph_ds.mapping.requires_archive()?; + let call_cache = self.call_cache.cheap_clone(); + let eth_adapters = self.eth_adapters.cheap_clone(); + let eth_call_gas = eth_call_gas(&self.chain_identifier); + + create_host_fns(abis, archive, call_cache, eth_adapters, eth_call_gas) + } + data_source::DataSource::Offchain(_) => vec![], }; - Ok(vec![ethereum_call, ethereum_get_balance, ethereum_get_code]) + Ok(host_fns) } } diff --git a/chain/ethereum/src/tests.rs b/chain/ethereum/src/tests.rs index 455a7c07432..00873f8ea87 100644 --- a/chain/ethereum/src/tests.rs +++ b/chain/ethereum/src/tests.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use graph::{ - blockchain::{block_stream::BlockWithTriggers, BlockPtr}, + blockchain::{block_stream::BlockWithTriggers, BlockPtr, Trigger}, prelude::{ web3::types::{Address, Bytes, Log, H160, H256, U64}, EthereumCall, LightEthereumBlock, @@ -107,10 +107,12 @@ fn test_trigger_ordering() { &logger, ); - assert_eq!( - block_with_triggers.trigger_data, - vec![log1, log2, call1, log3, call2, call4, call3, block2, block1] - ); + let expected = vec![log1, log2, call1, log3, call2, call4, call3, block2, block1] + .into_iter() + .map(|t| Trigger::Chain(t)) + .collect::>(); + + assert_eq!(block_with_triggers.trigger_data, expected); } #[test] @@ -203,8 +205,10 @@ fn test_trigger_dedup() { &logger, ); - assert_eq!( - block_with_triggers.trigger_data, - vec![log1, log2, call1, log3, call2, call3, block2, block1] - ); + let expected = vec![log1, log2, call1, log3, call2, call3, block2, block1] + .into_iter() + .map(|t| Trigger::Chain(t)) + .collect::>(); + + assert_eq!(block_with_triggers.trigger_data, expected); } diff --git a/chain/ethereum/src/trigger.rs b/chain/ethereum/src/trigger.rs index 128ed8d3e98..a5d83690b4b 100644 --- a/chain/ethereum/src/trigger.rs +++ b/chain/ethereum/src/trigger.rs @@ -3,6 +3,7 @@ use graph::blockchain::TriggerData; use graph::data::subgraph::API_VERSION_0_0_2; use graph::data::subgraph::API_VERSION_0_0_6; use graph::data::subgraph::API_VERSION_0_0_7; +use graph::data_source::common::DeclaredCall; use graph::prelude::ethabi::ethereum_types::H160; use graph::prelude::ethabi::ethereum_types::H256; use graph::prelude::ethabi::ethereum_types::U128; @@ -28,7 +29,6 @@ use graph_runtime_wasm::module::ToAscPtr; use std::ops::Deref; use std::{cmp::Ordering, sync::Arc}; -use crate::data_source::DeclaredCall; use crate::runtime::abi::AscEthereumBlock; use crate::runtime::abi::AscEthereumBlock_0_0_6; use crate::runtime::abi::AscEthereumCall; diff --git a/chain/near/src/chain.rs b/chain/near/src/chain.rs index 02c8e57d6a0..aa580b03f90 100644 --- a/chain/near/src/chain.rs +++ b/chain/near/src/chain.rs @@ -4,11 +4,11 @@ use graph::blockchain::firehose_block_ingestor::FirehoseBlockIngestor; use graph::blockchain::substreams_block_stream::SubstreamsBlockStream; use graph::blockchain::{ BasicBlockchainBuilder, BlockIngestor, BlockchainBuilder, BlockchainKind, NoopDecoderHook, - NoopRuntimeAdapter, + NoopRuntimeAdapter, Trigger, TriggerFilterWrapper, }; use graph::cheap_clone::CheapClone; use graph::components::network_provider::ChainName; -use graph::components::store::DeploymentCursorTracker; +use graph::components::store::{DeploymentCursorTracker, SourceableStore}; use graph::data::subgraph::UnifiedMappingApiVersion; use graph::env::EnvVars; use graph::firehose::FirehoseEndpoint; @@ -32,10 +32,12 @@ use graph::{ prelude::{async_trait, o, BlockNumber, ChainStore, Error, Logger, LoggerFactory}, }; use prost::Message; +use std::collections::BTreeSet; use std::sync::Arc; use crate::adapter::TriggerFilter; use crate::codec::substreams_triggers::BlockAndReceipts; +use crate::codec::Block; use crate::data_source::{DataSourceTemplate, UnresolvedDataSourceTemplate}; use crate::trigger::{self, NearTrigger}; use crate::{ @@ -108,7 +110,6 @@ impl BlockStreamBuilder for NearStreamBuilder { chain.metrics_registry.clone(), ))) } - async fn build_firehose( &self, chain: &Chain, @@ -151,8 +152,9 @@ impl BlockStreamBuilder for NearStreamBuilder { _chain: &Chain, _deployment: DeploymentLocator, _start_blocks: Vec, + _source_subgraph_stores: Vec>, _subgraph_current_block: Option, - _filter: Arc<::TriggerFilter>, + _filter: Arc>, _unified_api_version: UnifiedMappingApiVersion, ) -> Result>> { todo!() @@ -230,7 +232,8 @@ impl Blockchain for Chain { deployment: DeploymentLocator, store: impl DeploymentCursorTracker, start_blocks: Vec, - filter: Arc, + _source_subgraph_stores: Vec>, + filter: Arc>, unified_api_version: UnifiedMappingApiVersion, ) -> Result>, Error> { if self.prefer_substreams { @@ -242,7 +245,7 @@ impl Blockchain for Chain { deployment, store.firehose_cursor(), store.block_ptr(), - filter, + filter.chain_filter.clone(), ) .await; } @@ -254,7 +257,7 @@ impl Blockchain for Chain { store.firehose_cursor(), start_blocks, store.block_ptr(), - filter, + filter.chain_filter.clone(), unified_api_version, ) .await @@ -322,6 +325,18 @@ impl TriggersAdapterTrait for TriggersAdapter { panic!("Should never be called since not used by FirehoseBlockStream") } + async fn load_block_ptrs_by_numbers( + &self, + _logger: Logger, + _block_numbers: BTreeSet, + ) -> Result> { + unimplemented!() + } + + async fn chain_head_ptr(&self) -> Result, Error> { + unimplemented!() + } + async fn triggers_in_block( &self, logger: &Logger, @@ -462,11 +477,13 @@ impl BlockStreamMapper for FirehoseMapper { .into_iter() .zip(receipt.into_iter()) .map(|(outcome, receipt)| { - NearTrigger::Receipt(Arc::new(trigger::ReceiptWithOutcome { - outcome, - receipt, - block: arc_block.clone(), - })) + Trigger::Chain(NearTrigger::Receipt(Arc::new( + trigger::ReceiptWithOutcome { + outcome, + receipt, + block: arc_block.clone(), + }, + ))) }) .collect(); @@ -973,8 +990,8 @@ mod test { .trigger_data .clone() .into_iter() - .filter_map(|x| match x { - crate::trigger::NearTrigger::Block(b) => b.header.clone().map(|x| x.height), + .filter_map(|x| match x.as_chain() { + Some(crate::trigger::NearTrigger::Block(b)) => b.header.clone().map(|x| x.height), _ => None, }) .collect() diff --git a/chain/substreams/src/block_stream.rs b/chain/substreams/src/block_stream.rs index 8844df0610e..8008694f66b 100644 --- a/chain/substreams/src/block_stream.rs +++ b/chain/substreams/src/block_stream.rs @@ -7,9 +7,9 @@ use graph::{ BlockStream, BlockStreamBuilder as BlockStreamBuilderTrait, FirehoseCursor, }, substreams_block_stream::SubstreamsBlockStream, - Blockchain, + Blockchain, TriggerFilterWrapper, }, - components::store::DeploymentLocator, + components::store::{DeploymentLocator, SourceableStore}, data::subgraph::UnifiedMappingApiVersion, prelude::{async_trait, BlockNumber, BlockPtr}, schema::InputSchema, @@ -104,8 +104,9 @@ impl BlockStreamBuilderTrait for BlockStreamBuilder { _chain: &Chain, _deployment: DeploymentLocator, _start_blocks: Vec, + _source_subgraph_stores: Vec>, _subgraph_current_block: Option, - _filter: Arc, + _filter: Arc>, _unified_api_version: UnifiedMappingApiVersion, ) -> Result>> { unimplemented!("polling block stream is not support for substreams") diff --git a/chain/substreams/src/chain.rs b/chain/substreams/src/chain.rs index d2efe6dec91..a15dbb0f269 100644 --- a/chain/substreams/src/chain.rs +++ b/chain/substreams/src/chain.rs @@ -4,10 +4,10 @@ use anyhow::Error; use graph::blockchain::client::ChainClient; use graph::blockchain::{ BasicBlockchainBuilder, BlockIngestor, BlockTime, EmptyNodeCapabilities, NoopDecoderHook, - NoopRuntimeAdapter, + NoopRuntimeAdapter, TriggerFilterWrapper, }; use graph::components::network_provider::ChainName; -use graph::components::store::DeploymentCursorTracker; +use graph::components::store::{DeploymentCursorTracker, SourceableStore}; use graph::env::EnvVars; use graph::prelude::{BlockHash, CheapClone, Entity, LoggerFactory, MetricsRegistry}; use graph::schema::EntityKey; @@ -140,7 +140,8 @@ impl Blockchain for Chain { deployment: DeploymentLocator, store: impl DeploymentCursorTracker, _start_blocks: Vec, - filter: Arc, + _source_subgraph_stores: Vec>, + filter: Arc>, _unified_api_version: UnifiedMappingApiVersion, ) -> Result>, Error> { self.block_stream_builder @@ -150,7 +151,7 @@ impl Blockchain for Chain { deployment, store.firehose_cursor(), store.block_ptr(), - filter, + filter.chain_filter.clone(), ) .await } diff --git a/chain/substreams/src/trigger.rs b/chain/substreams/src/trigger.rs index ed7016216d5..13a5bed1a1d 100644 --- a/chain/substreams/src/trigger.rs +++ b/chain/substreams/src/trigger.rs @@ -16,7 +16,7 @@ use graph::{ }; use graph_runtime_wasm::module::ToAscPtr; use lazy_static::__Deref; -use std::sync::Arc; +use std::{collections::BTreeSet, sync::Arc}; use crate::{Block, Chain, NoopDataSourceTemplate, ParsedChanges}; @@ -136,6 +136,18 @@ impl blockchain::TriggersAdapter for TriggersAdapter { unimplemented!() } + async fn load_block_ptrs_by_numbers( + &self, + _logger: Logger, + _block_numbers: BTreeSet, + ) -> Result, Error> { + unimplemented!() + } + + async fn chain_head_ptr(&self) -> Result, Error> { + unimplemented!() + } + async fn scan_triggers( &self, _from: BlockNumber, @@ -228,6 +240,7 @@ where state.entity_cache.set( key, entity, + block.number, Some(&mut state.write_capacity_remaining), )?; } diff --git a/core/graphman/src/commands/deployment/mod.rs b/core/graphman/src/commands/deployment/mod.rs index 9c695a5f74a..4cac2277bbe 100644 --- a/core/graphman/src/commands/deployment/mod.rs +++ b/core/graphman/src/commands/deployment/mod.rs @@ -1,3 +1,5 @@ pub mod info; pub mod pause; +pub mod reassign; pub mod resume; +pub mod unassign; diff --git a/core/graphman/src/commands/deployment/reassign.rs b/core/graphman/src/commands/deployment/reassign.rs new file mode 100644 index 00000000000..2e5916a7aae --- /dev/null +++ b/core/graphman/src/commands/deployment/reassign.rs @@ -0,0 +1,117 @@ +use std::sync::Arc; + +use anyhow::anyhow; +use graph::components::store::DeploymentLocator; +use graph::components::store::StoreEvent; +use graph::prelude::EntityChange; +use graph::prelude::NodeId; +use graph_store_postgres::command_support::catalog; +use graph_store_postgres::command_support::catalog::Site; +use graph_store_postgres::connection_pool::ConnectionPool; +use graph_store_postgres::NotificationSender; +use thiserror::Error; + +use crate::deployment::DeploymentSelector; +use crate::deployment::DeploymentVersionSelector; +use crate::GraphmanError; + +pub struct Deployment { + locator: DeploymentLocator, + site: Site, +} + +impl Deployment { + pub fn locator(&self) -> &DeploymentLocator { + &self.locator + } +} + +#[derive(Debug, Error)] +pub enum ReassignDeploymentError { + #[error("deployment '{0}' is already assigned to '{1}'")] + AlreadyAssigned(String, String), + + #[error(transparent)] + Common(#[from] GraphmanError), +} + +#[derive(Clone, Debug)] +pub enum ReassignResult { + EmptyResponse, + CompletedWithWarnings(Vec), +} + +pub fn load_deployment( + primary_pool: ConnectionPool, + deployment: &DeploymentSelector, +) -> Result { + let mut primary_conn = primary_pool.get().map_err(GraphmanError::from)?; + + let locator = crate::deployment::load_deployment_locator( + &mut primary_conn, + deployment, + &DeploymentVersionSelector::All, + )?; + + let mut catalog_conn = catalog::Connection::new(primary_conn); + + let site = catalog_conn + .locate_site(locator.clone()) + .map_err(GraphmanError::from)? + .ok_or_else(|| { + GraphmanError::Store(anyhow!("deployment site not found for '{locator}'")) + })?; + + Ok(Deployment { locator, site }) +} + +pub fn reassign_deployment( + primary_pool: ConnectionPool, + notification_sender: Arc, + deployment: &Deployment, + node: &NodeId, +) -> Result { + let primary_conn = primary_pool.get().map_err(GraphmanError::from)?; + let mut catalog_conn = catalog::Connection::new(primary_conn); + + let changes: Vec = match catalog_conn + .assigned_node(&deployment.site) + .map_err(GraphmanError::from)? + { + Some(curr) => { + if &curr == node { + vec![] + } else { + catalog_conn + .reassign_subgraph(&deployment.site, &node) + .map_err(GraphmanError::from)? + } + } + None => catalog_conn + .assign_subgraph(&deployment.site, &node) + .map_err(GraphmanError::from)?, + }; + + if changes.is_empty() { + return Err(ReassignDeploymentError::AlreadyAssigned( + deployment.locator.to_string(), + node.to_string(), + )); + } + + catalog_conn + .send_store_event(¬ification_sender, &StoreEvent::new(changes)) + .map_err(GraphmanError::from)?; + + let mirror = catalog::Mirror::primary_only(primary_pool); + let count = mirror + .assignments(&node) + .map_err(GraphmanError::from)? + .len(); + if count == 1 { + let warning_msg = format!("This is the only deployment assigned to '{}'. Please make sure that the node ID is spelled correctly.",node.as_str()); + Ok(ReassignResult::CompletedWithWarnings(vec![warning_msg])) + } else { + Ok(ReassignResult::EmptyResponse) + } +} diff --git a/core/graphman/src/commands/deployment/unassign.rs b/core/graphman/src/commands/deployment/unassign.rs new file mode 100644 index 00000000000..5233e61ada1 --- /dev/null +++ b/core/graphman/src/commands/deployment/unassign.rs @@ -0,0 +1,80 @@ +use std::sync::Arc; + +use anyhow::anyhow; +use graph::components::store::DeploymentLocator; +use graph::components::store::StoreEvent; +use graph_store_postgres::command_support::catalog; +use graph_store_postgres::command_support::catalog::Site; +use graph_store_postgres::connection_pool::ConnectionPool; +use graph_store_postgres::NotificationSender; +use thiserror::Error; + +use crate::deployment::DeploymentSelector; +use crate::deployment::DeploymentVersionSelector; +use crate::GraphmanError; + +pub struct AssignedDeployment { + locator: DeploymentLocator, + site: Site, +} + +impl AssignedDeployment { + pub fn locator(&self) -> &DeploymentLocator { + &self.locator + } +} + +#[derive(Debug, Error)] +pub enum UnassignDeploymentError { + #[error("deployment '{0}' is already unassigned")] + AlreadyUnassigned(String), + + #[error(transparent)] + Common(#[from] GraphmanError), +} + +pub fn load_assigned_deployment( + primary_pool: ConnectionPool, + deployment: &DeploymentSelector, +) -> Result { + let mut primary_conn = primary_pool.get().map_err(GraphmanError::from)?; + + let locator = crate::deployment::load_deployment_locator( + &mut primary_conn, + deployment, + &DeploymentVersionSelector::All, + )?; + + let mut catalog_conn = catalog::Connection::new(primary_conn); + + let site = catalog_conn + .locate_site(locator.clone()) + .map_err(GraphmanError::from)? + .ok_or_else(|| { + GraphmanError::Store(anyhow!("deployment site not found for '{locator}'")) + })?; + + match catalog_conn + .assigned_node(&site) + .map_err(GraphmanError::from)? + { + Some(_) => Ok(AssignedDeployment { locator, site }), + None => Err(UnassignDeploymentError::AlreadyUnassigned( + locator.to_string(), + )), + } +} + +pub fn unassign_deployment( + primary_pool: ConnectionPool, + notification_sender: Arc, + deployment: AssignedDeployment, +) -> Result<(), GraphmanError> { + let primary_conn = primary_pool.get()?; + let mut catalog_conn = catalog::Connection::new(primary_conn); + + let changes = catalog_conn.unassign_subgraph(&deployment.site)?; + catalog_conn.send_store_event(¬ification_sender, &StoreEvent::new(changes))?; + + Ok(()) +} diff --git a/core/src/subgraph/context/instance/hosts.rs b/core/src/subgraph/context/instance/hosts.rs index 2ec1e4578f0..9c18e12ce1e 100644 --- a/core/src/subgraph/context/instance/hosts.rs +++ b/core/src/subgraph/context/instance/hosts.rs @@ -57,7 +57,7 @@ impl> OnchainHosts { } pub fn push(&mut self, host: Arc) { - assert!(host.data_source().as_onchain().is_some()); + assert!(host.data_source().is_chain_based()); self.hosts.push(host.cheap_clone()); let idx = self.hosts.len() - 1; diff --git a/core/src/subgraph/context/instance/mod.rs b/core/src/subgraph/context/instance/mod.rs index ed242836a28..86b64195493 100644 --- a/core/src/subgraph/context/instance/mod.rs +++ b/core/src/subgraph/context/instance/mod.rs @@ -22,13 +22,17 @@ pub(crate) struct SubgraphInstance> { pub(super) static_data_sources: Arc>>, host_metrics: Arc, - /// The hosts represent the data sources in the subgraph. There is one host per data source. + /// The hosts represent the onchain data sources in the subgraph. There is one host per data source. /// Data sources with no mappings (e.g. direct substreams) have no host. /// /// Onchain hosts must be created in increasing order of block number. `fn hosts_for_trigger` /// will return the onchain hosts in the same order as they were inserted. onchain_hosts: OnchainHosts, + /// `subgraph_hosts` represent subgraph data sources declared in the manifest. These are a special + /// kind of data source that depends on the data from another source subgraph. + subgraph_hosts: OnchainHosts, + offchain_hosts: OffchainHosts, /// Maps the hash of a module to a channel to the thread in which the module is instantiated. @@ -79,6 +83,7 @@ where network, static_data_sources: Arc::new(manifest.data_sources), onchain_hosts: OnchainHosts::new(), + subgraph_hosts: OnchainHosts::new(), offchain_hosts: OffchainHosts::new(), module_cache: HashMap::new(), templates, @@ -138,34 +143,44 @@ where ); } - let is_onchain = data_source.is_onchain(); let Some(host) = self.new_host(logger.clone(), data_source)? else { return Ok(None); }; // Check for duplicates and add the host. - if is_onchain { - // `onchain_hosts` will remain ordered by the creation block. - // See also 8f1bca33-d3b7-4035-affc-fd6161a12448. - ensure!( - self.onchain_hosts - .last() - .and_then(|h| h.creation_block_number()) - <= host.data_source().creation_block(), - ); + match host.data_source() { + DataSource::Onchain(_) => { + // `onchain_hosts` will remain ordered by the creation block. + // See also 8f1bca33-d3b7-4035-affc-fd6161a12448. + ensure!( + self.onchain_hosts + .last() + .and_then(|h| h.creation_block_number()) + <= host.data_source().creation_block(), + ); - if self.onchain_hosts.contains(&host) { - Ok(None) - } else { - self.onchain_hosts.push(host.cheap_clone()); - Ok(Some(host)) + if self.onchain_hosts.contains(&host) { + Ok(None) + } else { + self.onchain_hosts.push(host.cheap_clone()); + Ok(Some(host)) + } } - } else { - if self.offchain_hosts.contains(&host) { - Ok(None) - } else { - self.offchain_hosts.push(host.cheap_clone()); - Ok(Some(host)) + DataSource::Offchain(_) => { + if self.offchain_hosts.contains(&host) { + Ok(None) + } else { + self.offchain_hosts.push(host.cheap_clone()); + Ok(Some(host)) + } + } + DataSource::Subgraph(_) => { + if self.subgraph_hosts.contains(&host) { + Ok(None) + } else { + self.subgraph_hosts.push(host.cheap_clone()); + Ok(Some(host)) + } } } } @@ -226,6 +241,9 @@ where TriggerData::Offchain(trigger) => self .offchain_hosts .matches_by_address(trigger.source.address().as_ref().map(|a| a.as_slice())), + TriggerData::Subgraph(trigger) => self + .subgraph_hosts + .matches_by_address(Some(trigger.source.to_bytes().as_slice())), } } diff --git a/core/src/subgraph/context/mod.rs b/core/src/subgraph/context/mod.rs index ea42d2ff503..ef265978ede 100644 --- a/core/src/subgraph/context/mod.rs +++ b/core/src/subgraph/context/mod.rs @@ -6,7 +6,7 @@ use crate::polling_monitor::{ use anyhow::{self, Error}; use bytes::Bytes; use graph::{ - blockchain::{BlockTime, Blockchain}, + blockchain::{BlockTime, Blockchain, TriggerFilterWrapper}, components::{ store::{DeploymentId, SubgraphFork}, subgraph::{HostMetrics, MappingError, RuntimeHost as _, SharedProofOfIndexing}, @@ -77,7 +77,7 @@ where pub(crate) instance: SubgraphInstance, pub instances: SubgraphKeepAlive, pub offchain_monitor: OffchainMonitor, - pub filter: Option, + pub filter: Option>, pub(crate) trigger_processor: Box>, pub(crate) decoder: Box>, } diff --git a/core/src/subgraph/inputs.rs b/core/src/subgraph/inputs.rs index 02b20c089e3..ca52073ab06 100644 --- a/core/src/subgraph/inputs.rs +++ b/core/src/subgraph/inputs.rs @@ -1,7 +1,7 @@ use graph::{ - blockchain::{Blockchain, TriggersAdapter}, + blockchain::{block_stream::TriggersAdapterWrapper, Blockchain}, components::{ - store::{DeploymentLocator, SubgraphFork, WritableStore}, + store::{DeploymentLocator, SourceableStore, SubgraphFork, WritableStore}, subgraph::ProofOfIndexingVersion, }, data::subgraph::{SubgraphFeature, UnifiedMappingApiVersion}, @@ -16,11 +16,12 @@ pub struct IndexingInputs { pub features: BTreeSet, pub start_blocks: Vec, pub end_blocks: BTreeSet, + pub source_subgraph_stores: Vec>, pub stop_block: Option, pub max_end_block: Option, pub store: Arc, pub debug_fork: Option>, - pub triggers_adapter: Arc>, + pub triggers_adapter: Arc>, pub chain: Arc, pub templates: Arc>>, pub unified_api_version: UnifiedMappingApiVersion, @@ -40,6 +41,7 @@ impl IndexingInputs { features, start_blocks, end_blocks, + source_subgraph_stores, stop_block, max_end_block, store: _, @@ -58,6 +60,7 @@ impl IndexingInputs { features: features.clone(), start_blocks: start_blocks.clone(), end_blocks: end_blocks.clone(), + source_subgraph_stores: source_subgraph_stores.clone(), stop_block: stop_block.clone(), max_end_block: max_end_block.clone(), store, diff --git a/core/src/subgraph/instance_manager.rs b/core/src/subgraph/instance_manager.rs index b255e9b3a0b..2d54a90417c 100644 --- a/core/src/subgraph/instance_manager.rs +++ b/core/src/subgraph/instance_manager.rs @@ -9,9 +9,11 @@ use crate::subgraph::Decoder; use std::collections::BTreeSet; use crate::subgraph::runner::SubgraphRunner; -use graph::blockchain::block_stream::BlockStreamMetrics; +use graph::blockchain::block_stream::{BlockStreamMetrics, TriggersAdapterWrapper}; use graph::blockchain::{Blockchain, BlockchainKind, DataSource, NodeCapabilities}; use graph::components::metrics::gas::GasMetrics; +use graph::components::metrics::subgraph::DeploymentStatusMetric; +use graph::components::store::SourceableStore; use graph::components::subgraph::ProofOfIndexingVersion; use graph::data::subgraph::{UnresolvedSubgraphManifest, SPEC_VERSION_0_0_6}; use graph::data::value::Word; @@ -69,77 +71,91 @@ impl SubgraphInstanceManagerTrait for SubgraphInstanceManager< let err_logger = logger.clone(); let instance_manager = self.cheap_clone(); - let subgraph_start_future = async move { - match BlockchainKind::from_manifest(&manifest)? { - BlockchainKind::Arweave => { - let runner = instance_manager - .build_subgraph_runner::( - logger.clone(), - self.env_vars.cheap_clone(), - loc.clone(), - manifest, - stop_block, - Box::new(SubgraphTriggerProcessor {}), - ) - .await?; - - self.start_subgraph_inner(logger, loc, runner).await - } - BlockchainKind::Ethereum => { - let runner = instance_manager - .build_subgraph_runner::( - logger.clone(), - self.env_vars.cheap_clone(), - loc.clone(), - manifest, - stop_block, - Box::new(SubgraphTriggerProcessor {}), - ) - .await?; - - self.start_subgraph_inner(logger, loc, runner).await - } - BlockchainKind::Near => { - let runner = instance_manager - .build_subgraph_runner::( - logger.clone(), - self.env_vars.cheap_clone(), - loc.clone(), - manifest, - stop_block, - Box::new(SubgraphTriggerProcessor {}), - ) - .await?; - - self.start_subgraph_inner(logger, loc, runner).await - } - BlockchainKind::Cosmos => { - let runner = instance_manager - .build_subgraph_runner::( - logger.clone(), - self.env_vars.cheap_clone(), - loc.clone(), - manifest, - stop_block, - Box::new(SubgraphTriggerProcessor {}), - ) - .await?; - - self.start_subgraph_inner(logger, loc, runner).await - } - BlockchainKind::Substreams => { - let runner = instance_manager - .build_subgraph_runner::( - logger.clone(), - self.env_vars.cheap_clone(), - loc.cheap_clone(), - manifest, - stop_block, - Box::new(graph_chain_substreams::TriggerProcessor::new(loc.clone())), - ) - .await?; - - self.start_subgraph_inner(logger, loc, runner).await + let deployment_status_metric = self.new_deployment_status_metric(&loc); + deployment_status_metric.starting(); + + let subgraph_start_future = { + let deployment_status_metric = deployment_status_metric.clone(); + + async move { + match BlockchainKind::from_manifest(&manifest)? { + BlockchainKind::Arweave => { + let runner = instance_manager + .build_subgraph_runner::( + logger.clone(), + self.env_vars.cheap_clone(), + loc.clone(), + manifest, + stop_block, + Box::new(SubgraphTriggerProcessor {}), + deployment_status_metric, + ) + .await?; + + self.start_subgraph_inner(logger, loc, runner).await + } + BlockchainKind::Ethereum => { + let runner = instance_manager + .build_subgraph_runner::( + logger.clone(), + self.env_vars.cheap_clone(), + loc.clone(), + manifest, + stop_block, + Box::new(SubgraphTriggerProcessor {}), + deployment_status_metric, + ) + .await?; + + self.start_subgraph_inner(logger, loc, runner).await + } + BlockchainKind::Near => { + let runner = instance_manager + .build_subgraph_runner::( + logger.clone(), + self.env_vars.cheap_clone(), + loc.clone(), + manifest, + stop_block, + Box::new(SubgraphTriggerProcessor {}), + deployment_status_metric, + ) + .await?; + + self.start_subgraph_inner(logger, loc, runner).await + } + BlockchainKind::Cosmos => { + let runner = instance_manager + .build_subgraph_runner::( + logger.clone(), + self.env_vars.cheap_clone(), + loc.clone(), + manifest, + stop_block, + Box::new(SubgraphTriggerProcessor {}), + deployment_status_metric, + ) + .await?; + + self.start_subgraph_inner(logger, loc, runner).await + } + BlockchainKind::Substreams => { + let runner = instance_manager + .build_subgraph_runner::( + logger.clone(), + self.env_vars.cheap_clone(), + loc.cheap_clone(), + manifest, + stop_block, + Box::new(graph_chain_substreams::TriggerProcessor::new( + loc.clone(), + )), + deployment_status_metric, + ) + .await?; + + self.start_subgraph_inner(logger, loc, runner).await + } } } }; @@ -152,12 +168,16 @@ impl SubgraphInstanceManagerTrait for SubgraphInstanceManager< graph::spawn(async move { match subgraph_start_future.await { Ok(()) => {} - Err(err) => error!( - err_logger, - "Failed to start subgraph"; - "error" => format!("{:#}", err), - "code" => LogCode::SubgraphStartFailure - ), + Err(err) => { + deployment_status_metric.failed(); + + error!( + err_logger, + "Failed to start subgraph"; + "error" => format!("{:#}", err), + "code" => LogCode::SubgraphStartFailure + ); + } } }); } @@ -209,6 +229,30 @@ impl SubgraphInstanceManager { } } + pub async fn get_sourceable_stores( + &self, + hashes: Vec, + is_runner_test: bool, + ) -> anyhow::Result>> { + if is_runner_test { + return Ok(Vec::new()); + } + + let mut sourceable_stores = Vec::new(); + let subgraph_store = self.subgraph_store.clone(); + + for hash in hashes { + let loc = subgraph_store + .active_locator(&hash)? + .ok_or_else(|| anyhow!("no active deployment for hash {}", hash))?; + + let sourceable_store = subgraph_store.clone().sourceable(loc.id.clone()).await?; + sourceable_stores.push(sourceable_store); + } + + Ok(sourceable_stores) + } + pub async fn build_subgraph_runner( &self, logger: Logger, @@ -217,6 +261,35 @@ impl SubgraphInstanceManager { manifest: serde_yaml::Mapping, stop_block: Option, tp: Box>>, + deployment_status_metric: DeploymentStatusMetric, + ) -> anyhow::Result>> + where + C: Blockchain, + ::MappingTrigger: ToAscPtr, + { + self.build_subgraph_runner_inner( + logger, + env_vars, + deployment, + manifest, + stop_block, + tp, + deployment_status_metric, + false, + ) + .await + } + + pub async fn build_subgraph_runner_inner( + &self, + logger: Logger, + env_vars: Arc, + deployment: DeploymentLocator, + manifest: serde_yaml::Mapping, + stop_block: Option, + tp: Box>>, + deployment_status_metric: DeploymentStatusMetric, + is_runner_test: bool, ) -> anyhow::Result>> where C: Blockchain, @@ -314,6 +387,16 @@ impl SubgraphInstanceManager { .filter_map(|d| d.as_onchain().cloned()) .collect::>(); + let subgraph_data_sources = data_sources + .iter() + .filter_map(|d| d.as_subgraph()) + .collect::>(); + + let subgraph_ds_source_deployments = subgraph_data_sources + .iter() + .map(|d| d.source.address()) + .collect::>(); + let required_capabilities = C::NodeCapabilities::from_data_sources(&onchain_data_sources); let network: Word = manifest.network_name().into(); @@ -325,7 +408,7 @@ impl SubgraphInstanceManager { let start_blocks: Vec = data_sources .iter() - .filter_map(|d| d.as_onchain().map(|d: &C::DataSource| d.start_block())) + .filter_map(|d| d.start_block()) .collect(); let end_blocks: BTreeSet = manifest @@ -387,6 +470,7 @@ impl SubgraphInstanceManager { registry.cheap_clone(), deployment.hash.as_str(), stopwatch_metrics.clone(), + deployment_status_metric, )); let block_stream_metrics = Arc::new(BlockStreamMetrics::new( @@ -432,11 +516,21 @@ impl SubgraphInstanceManager { let decoder = Box::new(Decoder::new(decoder_hook)); + let subgraph_data_source_stores = self + .get_sourceable_stores::(subgraph_ds_source_deployments, is_runner_test) + .await?; + + let triggers_adapter = Arc::new(TriggersAdapterWrapper::new( + triggers_adapter, + subgraph_data_source_stores.clone(), + )); + let inputs = IndexingInputs { deployment: deployment.clone(), features, start_blocks, end_blocks, + source_subgraph_stores: subgraph_data_source_stores, stop_block, max_end_block, store, @@ -496,7 +590,7 @@ impl SubgraphInstanceManager { ::MappingTrigger: ToAscPtr, { let registry = self.metrics_registry.cheap_clone(); - let subgraph_metrics_unregister = runner.metrics.subgraph.cheap_clone(); + let subgraph_metrics = runner.metrics.subgraph.cheap_clone(); // Keep restarting the subgraph until it terminates. The subgraph // will usually only run once, but is restarted whenever a block @@ -513,7 +607,9 @@ impl SubgraphInstanceManager { // https://github.com/tokio-rs/tokio/issues/3493. graph::spawn_thread(deployment.to_string(), move || { match graph::block_on(task::unconstrained(runner.run())) { - Ok(()) => {} + Ok(()) => { + subgraph_metrics.deployment_status.stopped(); + } Err(SubgraphRunnerError::Duplicate) => { // We do not need to unregister metrics because they are unique per subgraph // and another runner is still active. @@ -521,12 +617,20 @@ impl SubgraphInstanceManager { } Err(err) => { error!(&logger, "Subgraph instance failed to run: {:#}", err); + subgraph_metrics.deployment_status.failed(); } } - subgraph_metrics_unregister.unregister(registry); + subgraph_metrics.unregister(registry); }); Ok(()) } + + pub fn new_deployment_status_metric( + &self, + deployment: &DeploymentLocator, + ) -> DeploymentStatusMetric { + DeploymentStatusMetric::register(&self.metrics_registry, deployment) + } } diff --git a/core/src/subgraph/registrar.rs b/core/src/subgraph/registrar.rs index b7d45613b74..258f4bbcd15 100644 --- a/core/src/subgraph/registrar.rs +++ b/core/src/subgraph/registrar.rs @@ -611,7 +611,6 @@ async fn create_subgraph_version( ) .map_err(SubgraphRegistrarError::ResolveError) .await?; - // Determine if the graft_base should be validated. // Validate the graft_base if there is a pending graft, ensuring its presence. // If the subgraph is new (indicated by DeploymentNotFound), the graft_base should be validated. diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index fe1ac3a020b..1e0fa2d4c8e 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -7,7 +7,10 @@ use atomic_refcell::AtomicRefCell; use graph::blockchain::block_stream::{ BlockStreamError, BlockStreamEvent, BlockWithTriggers, FirehoseCursor, }; -use graph::blockchain::{Block, BlockTime, Blockchain, DataSource as _, TriggerFilter as _}; +use graph::blockchain::{ + Block, BlockTime, Blockchain, DataSource as _, SubgraphFilter, Trigger, TriggerFilter as _, + TriggerFilterWrapper, +}; use graph::components::store::{EmptyStore, GetScope, ReadStore, StoredDynamicDataSource}; use graph::components::subgraph::InstanceDSTemplate; use graph::components::{ @@ -30,6 +33,7 @@ use graph::schema::EntityKey; use graph::util::{backoff::ExponentialBackoff, lfu_cache::LfuCache}; use std::sync::Arc; use std::time::{Duration, Instant}; +use std::vec; const MINUTE: Duration = Duration::from_secs(60); @@ -125,7 +129,7 @@ where self.inputs.static_filters || self.ctx.hosts_len() > ENV_VARS.static_filters_threshold } - fn build_filter(&self) -> C::TriggerFilter { + fn build_filter(&self) -> TriggerFilterWrapper { let current_ptr = self.inputs.store.block_ptr(); let static_filters = self.is_static_filters_enabled(); @@ -137,10 +141,31 @@ where None => true, }; + let data_sources = self.ctx.static_data_sources(); + + let subgraph_filter = data_sources + .iter() + .filter_map(|ds| ds.as_subgraph()) + .map(|ds| SubgraphFilter { + subgraph: ds.source.address(), + start_block: ds.source.start_block, + entities: ds + .mapping + .handlers + .iter() + .map(|handler| handler.entity.clone()) + .collect(), + manifest_idx: ds.manifest_idx, + }) + .collect::>(); + // if static_filters is not enabled we just stick to the filter based on all the data sources. if !static_filters { - return C::TriggerFilter::from_data_sources( - self.ctx.onchain_data_sources().filter(end_block_filter), + return TriggerFilterWrapper::new( + C::TriggerFilter::from_data_sources( + self.ctx.onchain_data_sources().filter(end_block_filter), + ), + subgraph_filter, ); } @@ -167,11 +192,11 @@ where filter.extend_with_template(templates.iter().filter_map(|ds| ds.as_onchain()).cloned()); - filter + TriggerFilterWrapper::new(filter, subgraph_filter) } #[cfg(debug_assertions)] - pub fn build_filter_for_test(&self) -> C::TriggerFilter { + pub fn build_filter_for_test(&self) -> TriggerFilterWrapper { self.build_filter() } @@ -180,6 +205,8 @@ where } async fn run_inner(mut self, break_on_restart: bool) -> Result { + self.update_deployment_synced_metric(); + // If a subgraph failed for deterministic reasons, before start indexing, we first // revert the deployment head. It should lead to the same result since the error was // deterministic. @@ -229,7 +256,7 @@ where let mut block_stream = new_block_stream( &self.inputs, - self.ctx.filter.as_ref().unwrap(), // Safe to unwrap as we just called `build_filter` in the previous line + self.ctx.filter.clone().unwrap(), // Safe to unwrap as we just called `build_filter` in the previous line &self.metrics.subgraph, ) .await? @@ -244,6 +271,8 @@ where debug!(self.logger, "Starting block stream"); + self.metrics.subgraph.deployment_status.running(); + // Process events from the stream as long as no restart is needed loop { let event = { @@ -266,6 +295,8 @@ where res })?; + self.update_deployment_synced_metric(); + // It is possible that the subgraph was unassigned, but the runner was in // a retry delay state and did not observe the cancel signal. if block_stream_cancel_handle.is_canceled() { @@ -367,7 +398,10 @@ where .match_and_decode_many( &logger, &block, - triggers.into_iter().map(TriggerData::Onchain), + triggers.into_iter().map(|t| match t { + Trigger::Chain(t) => TriggerData::Onchain(t), + Trigger::Subgraph(t) => TriggerData::Subgraph(t), + }), hosts_filter, &self.metrics.subgraph, ) @@ -465,9 +499,12 @@ where let (data_sources, runtime_hosts) = self.create_dynamic_data_sources(block_state.drain_created_data_sources())?; - let filter = C::TriggerFilter::from_data_sources( - data_sources.iter().filter_map(DataSource::as_onchain), - ); + let filter = &Arc::new(TriggerFilterWrapper::new( + C::TriggerFilter::from_data_sources( + data_sources.iter().filter_map(DataSource::as_onchain), + ), + vec![], + )); let block: Arc = if self.inputs.chain.is_refetch_block_required() { let cur = firehose_cursor.clone(); @@ -496,7 +533,7 @@ where let block_with_triggers = self .inputs .triggers_adapter - .triggers_in_block(&logger, block.as_ref().clone(), &filter) + .triggers_in_block(&logger, block.as_ref().clone(), filter) .await?; let triggers = block_with_triggers.trigger_data; @@ -526,7 +563,10 @@ where .match_and_decode_many( &logger, &block, - triggers.into_iter().map(TriggerData::Onchain), + triggers.into_iter().map(|t| match t { + Trigger::Chain(t) => TriggerData::Onchain(t), + Trigger::Subgraph(_) => unreachable!(), // TODO(krishna): Re-evaulate this + }), |_| Box::new(runtime_hosts.iter().map(Arc::as_ref)), &self.metrics.subgraph, ) @@ -876,7 +916,7 @@ where self.state.should_try_unfail_non_deterministic = false; if let UnfailOutcome::Unfailed = outcome { - self.metrics.stream.deployment_failed.set(0.0); + self.metrics.subgraph.deployment_status.running(); self.state.backoff.reset(); } } @@ -909,7 +949,7 @@ where // Handle unexpected stream errors by marking the subgraph as failed. Err(e) => { - self.metrics.stream.deployment_failed.set(1.0); + self.metrics.subgraph.deployment_status.failed(); let last_good_block = self .inputs .store @@ -1055,15 +1095,18 @@ where .stream .stopwatch .start_section(PROCESS_WASM_BLOCK_SECTION_NAME); - self.handle_process_wasm_block( - block_ptr, - block_time, - data, - handler, - cursor, - cancel_handle, - ) - .await? + let res = self + .handle_process_wasm_block( + block_ptr.clone(), + block_time, + data, + handler, + cursor, + cancel_handle, + ) + .await; + let start = Instant::now(); + self.handle_action(start, block_ptr, res).await? } Some(Ok(BlockStreamEvent::ProcessBlock(block, cursor))) => { let _section = self @@ -1192,6 +1235,13 @@ where Ok((mods, processed_data_sources, persisted_data_sources)) } + + fn update_deployment_synced_metric(&self) { + self.metrics + .subgraph + .deployment_synced + .record(self.inputs.store.is_deployment_synced()); + } } #[derive(Debug)] @@ -1221,7 +1271,7 @@ trait StreamEventHandler { handler: String, cursor: FirehoseCursor, cancel_handle: &CancelHandle, - ) -> Result; + ) -> Result; async fn handle_process_block( &mut self, block: BlockWithTriggers, @@ -1255,7 +1305,7 @@ where handler: String, cursor: FirehoseCursor, cancel_handle: &CancelHandle, - ) -> Result { + ) -> Result { let logger = self.logger.new(o!( "block_number" => format!("{:?}", block_ptr.number), "block_hash" => format!("{}", block_ptr.hash) @@ -1632,6 +1682,7 @@ async fn update_proof_of_indexing( key: EntityKey, digest: Bytes, block_time: BlockTime, + block: BlockNumber, ) -> Result<(), Error> { let digest_name = entity_cache.schema.poi_digest(); let mut data = vec![ @@ -1646,11 +1697,12 @@ async fn update_proof_of_indexing( data.push((entity_cache.schema.poi_block_time(), block_time)); } let poi = entity_cache.make_entity(data)?; - entity_cache.set(key, poi, None) + entity_cache.set(key, poi, block, None) } let _section_guard = stopwatch.start_section("update_proof_of_indexing"); + let block_number = proof_of_indexing.get_block(); let mut proof_of_indexing = proof_of_indexing.take(); for (causality_region, stream) in proof_of_indexing.drain() { @@ -1686,6 +1738,7 @@ async fn update_proof_of_indexing( entity_key, updated_proof_of_indexing, block_time, + block_number, )?; } diff --git a/core/src/subgraph/stream.rs b/core/src/subgraph/stream.rs index c1d767e3fcf..5547543f13d 100644 --- a/core/src/subgraph/stream.rs +++ b/core/src/subgraph/stream.rs @@ -1,13 +1,13 @@ use crate::subgraph::inputs::IndexingInputs; use anyhow::bail; use graph::blockchain::block_stream::{BlockStream, BufferedBlockStream}; -use graph::blockchain::Blockchain; +use graph::blockchain::{Blockchain, TriggerFilterWrapper}; use graph::prelude::{CheapClone, Error, SubgraphInstanceMetrics}; use std::sync::Arc; pub async fn new_block_stream( inputs: &IndexingInputs, - filter: &C::TriggerFilter, + filter: TriggerFilterWrapper, metrics: &SubgraphInstanceMetrics, ) -> Result>, Error> { let is_firehose = inputs.chain.chain_client().is_firehose(); @@ -18,6 +18,7 @@ pub async fn new_block_stream( inputs.deployment.clone(), inputs.store.cheap_clone(), inputs.start_blocks.clone(), + inputs.source_subgraph_stores.clone(), Arc::new(filter.clone()), inputs.unified_api_version.clone(), ) diff --git a/docs/environment-variables.md b/docs/environment-variables.md index 1217be769aa..5d2b501f2a6 100644 --- a/docs/environment-variables.md +++ b/docs/environment-variables.md @@ -266,3 +266,9 @@ those. - `GRAPH_POSTPONE_ATTRIBUTE_INDEX_CREATION`: During the coping of a subgraph postponing creation of certain indexes (btree, attribute based ones), would speed up syncing +- `GRAPH_STORE_INSERT_EXTRA_COLS`: Makes it possible to work around bugs in + the subgraph writing code that manifest as Postgres errors saying 'number + of parameters must be between 0 and 65535' Such errors are always + graph-node bugs, but since it is hard to work around them, setting this + variable to something like 10 makes it possible to work around such a bug + while it is being fixed (default: 0) diff --git a/graph/src/blockchain/block_stream.rs b/graph/src/blockchain/block_stream.rs index 0daf4c33eda..b9f602d802c 100644 --- a/graph/src/blockchain/block_stream.rs +++ b/graph/src/blockchain/block_stream.rs @@ -1,3 +1,5 @@ +use crate::blockchain::SubgraphFilter; +use crate::data_source::{subgraph, CausalityRegion}; use crate::substreams::Clock; use crate::substreams_rpc::response::Message as SubstreamsMessage; use crate::substreams_rpc::BlockScopedData; @@ -5,6 +7,7 @@ use anyhow::Error; use async_stream::stream; use futures03::Stream; use prost_types::Any; +use std::collections::{BTreeMap, BTreeSet, HashMap}; use std::fmt; use std::sync::Arc; use std::time::Instant; @@ -12,13 +15,13 @@ use thiserror::Error; use tokio::sync::mpsc::{self, Receiver, Sender}; use super::substreams_block_stream::SubstreamsLogData; -use super::{Block, BlockPtr, BlockTime, Blockchain}; +use super::{Block, BlockPtr, BlockTime, Blockchain, Trigger, TriggerFilterWrapper}; use crate::anyhow::Result; -use crate::components::store::{BlockNumber, DeploymentLocator}; +use crate::components::store::{BlockNumber, DeploymentLocator, SourceableStore}; use crate::data::subgraph::UnifiedMappingApiVersion; use crate::firehose::{self, FirehoseEndpoint}; use crate::futures03::stream::StreamExt as _; -use crate::schema::InputSchema; +use crate::schema::{EntityType, InputSchema}; use crate::substreams_rpc::response::Message; use crate::{prelude::*, prometheus::labels}; @@ -144,10 +147,33 @@ pub trait BlockStreamBuilder: Send + Sync { chain: &C, deployment: DeploymentLocator, start_blocks: Vec, + source_subgraph_stores: Vec>, subgraph_current_block: Option, - filter: Arc, + filter: Arc>, unified_api_version: UnifiedMappingApiVersion, ) -> Result>>; + + async fn build_subgraph_block_stream( + &self, + chain: &C, + deployment: DeploymentLocator, + start_blocks: Vec, + source_subgraph_stores: Vec>, + subgraph_current_block: Option, + filter: Arc>, + unified_api_version: UnifiedMappingApiVersion, + ) -> Result>> { + self.build_polling( + chain, + deployment, + start_blocks, + source_subgraph_stores, + subgraph_current_block, + filter, + unified_api_version, + ) + .await + } } #[derive(Debug, Clone)] @@ -198,7 +224,7 @@ impl AsRef> for FirehoseCursor { #[derive(Debug)] pub struct BlockWithTriggers { pub block: C::Block, - pub trigger_data: Vec, + pub trigger_data: Vec>, } impl Clone for BlockWithTriggers @@ -216,7 +242,31 @@ where impl BlockWithTriggers { /// Creates a BlockWithTriggers structure, which holds /// the trigger data ordered and without any duplicates. - pub fn new(block: C::Block, mut trigger_data: Vec, logger: &Logger) -> Self { + pub fn new(block: C::Block, trigger_data: Vec, logger: &Logger) -> Self { + Self::new_with_triggers( + block, + trigger_data.into_iter().map(Trigger::Chain).collect(), + logger, + ) + } + + pub fn new_with_subgraph_triggers( + block: C::Block, + trigger_data: Vec, + logger: &Logger, + ) -> Self { + Self::new_with_triggers( + block, + trigger_data.into_iter().map(Trigger::Subgraph).collect(), + logger, + ) + } + + fn new_with_triggers( + block: C::Block, + mut trigger_data: Vec>, + logger: &Logger, + ) -> Self { // This is where triggers get sorted. trigger_data.sort(); @@ -256,6 +306,289 @@ impl BlockWithTriggers { pub fn parent_ptr(&self) -> Option { self.block.parent_ptr() } + + pub fn extend_triggers(&mut self, triggers: Vec>) { + self.trigger_data.extend(triggers); + self.trigger_data.sort(); + } +} + +/// The `TriggersAdapterWrapper` wraps the chain-specific `TriggersAdapter`, enabling chain-agnostic +/// handling of subgraph datasource triggers. Without this wrapper, we would have to duplicate the same +/// logic for each chain, increasing code repetition. +pub struct TriggersAdapterWrapper { + pub adapter: Arc>, + pub source_subgraph_stores: HashMap>, +} + +impl TriggersAdapterWrapper { + pub fn new( + adapter: Arc>, + source_subgraph_stores: Vec>, + ) -> Self { + let stores_map: HashMap<_, _> = source_subgraph_stores + .iter() + .map(|store| (store.input_schema().id().clone(), store.clone())) + .collect(); + Self { + adapter, + source_subgraph_stores: stores_map, + } + } + + pub async fn blocks_with_subgraph_triggers( + &self, + logger: &Logger, + filters: &[SubgraphFilter], + range: SubgraphTriggerScanRange, + ) -> Result>, Error> { + if filters.is_empty() { + return Err(anyhow!("No subgraph filters provided")); + } + + let (blocks, hash_to_entities) = match range { + SubgraphTriggerScanRange::Single(block) => { + let hash_to_entities = self + .fetch_entities_for_filters(filters, block.number(), block.number()) + .await?; + + (vec![block], hash_to_entities) + } + SubgraphTriggerScanRange::Range(from, to) => { + let hash_to_entities = self.fetch_entities_for_filters(filters, from, to).await?; + + // Get block numbers that have entities + let mut block_numbers: BTreeSet<_> = hash_to_entities + .iter() + .flat_map(|(_, entities, _)| entities.keys().copied()) + .collect(); + + // Always include the last block in the range + block_numbers.insert(to); + + let blocks = self + .adapter + .load_block_ptrs_by_numbers(logger.clone(), block_numbers) + .await?; + + (blocks, hash_to_entities) + } + }; + + create_subgraph_triggers::(logger.clone(), blocks, hash_to_entities).await + } + + async fn fetch_entities_for_filters( + &self, + filters: &[SubgraphFilter], + from: BlockNumber, + to: BlockNumber, + ) -> Result< + Vec<( + DeploymentHash, + BTreeMap>, + u32, + )>, + Error, + > { + let futures = filters + .iter() + .filter_map(|filter| { + self.source_subgraph_stores + .get(&filter.subgraph) + .map(|store| { + let store = store.clone(); + let schema = store.input_schema(); + + async move { + let entities = + get_entities_for_range(&store, filter, &schema, from, to).await?; + Ok::<_, Error>((filter.subgraph.clone(), entities, filter.manifest_idx)) + } + }) + }) + .collect::>(); + + if futures.is_empty() { + return Ok(Vec::new()); + } + + futures03::future::try_join_all(futures).await + } +} + +fn create_subgraph_trigger_from_entities( + subgraph: &DeploymentHash, + entities: Vec, + manifest_idx: u32, +) -> Vec { + entities + .into_iter() + .map(|entity| subgraph::TriggerData { + source: subgraph.clone(), + entity, + source_idx: manifest_idx, + }) + .collect() +} + +async fn create_subgraph_triggers( + logger: Logger, + blocks: Vec, + subgraph_data: Vec<( + DeploymentHash, + BTreeMap>, + u32, + )>, +) -> Result>, Error> { + let logger_clone = logger.cheap_clone(); + let blocks: Vec> = blocks + .into_iter() + .map(|block| { + let block_number = block.number(); + let mut all_trigger_data = Vec::new(); + + for (hash, entities, manifest_idx) in subgraph_data.iter() { + if let Some(block_entities) = entities.get(&block_number) { + let trigger_data = create_subgraph_trigger_from_entities( + hash, + block_entities.clone(), + *manifest_idx, + ); + all_trigger_data.extend(trigger_data); + } + } + + BlockWithTriggers::new_with_subgraph_triggers(block, all_trigger_data, &logger_clone) + }) + .collect(); + + Ok(blocks) +} + +pub enum SubgraphTriggerScanRange { + Single(C::Block), + Range(BlockNumber, BlockNumber), +} + +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum EntityOperationKind { + Create, + Modify, + Delete, +} + +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct EntitySourceOperation { + pub entity_op: EntityOperationKind, + pub entity_type: EntityType, + pub entity: Entity, + pub vid: i64, +} + +async fn get_entities_for_range( + store: &Arc, + filter: &SubgraphFilter, + schema: &InputSchema, + from: BlockNumber, + to: BlockNumber, +) -> Result>, Error> { + let entity_types: Result> = filter + .entities + .iter() + .map(|name| schema.entity_type(name)) + .collect(); + Ok(store.get_range(entity_types?, CausalityRegion::ONCHAIN, from..to)?) +} + +impl TriggersAdapterWrapper { + pub async fn ancestor_block( + &self, + ptr: BlockPtr, + offset: BlockNumber, + root: Option, + ) -> Result, Error> { + self.adapter.ancestor_block(ptr, offset, root).await + } + + pub async fn scan_triggers( + &self, + logger: &Logger, + from: BlockNumber, + to: BlockNumber, + filter: &Arc>, + ) -> Result<(Vec>, BlockNumber), Error> { + if !filter.subgraph_filter.is_empty() { + let blocks_with_triggers = self + .blocks_with_subgraph_triggers( + logger, + &filter.subgraph_filter, + SubgraphTriggerScanRange::Range(from, to), + ) + .await?; + + return Ok((blocks_with_triggers, to)); + } + + self.adapter + .scan_triggers(from, to, &filter.chain_filter) + .await + } + + pub async fn triggers_in_block( + &self, + logger: &Logger, + block: C::Block, + filter: &Arc>, + ) -> Result, Error> { + trace!( + logger, + "triggers_in_block"; + "block_number" => block.number(), + "block_hash" => block.hash().hash_hex(), + ); + + if !filter.subgraph_filter.is_empty() { + let blocks_with_triggers = self + .blocks_with_subgraph_triggers( + logger, + &filter.subgraph_filter, + SubgraphTriggerScanRange::Single(block), + ) + .await?; + + return Ok(blocks_with_triggers.into_iter().next().unwrap()); + } + + self.adapter + .triggers_in_block(logger, block, &filter.chain_filter) + .await + } + + pub async fn is_on_main_chain(&self, ptr: BlockPtr) -> Result { + self.adapter.is_on_main_chain(ptr).await + } + + pub async fn parent_ptr(&self, block: &BlockPtr) -> Result, Error> { + self.adapter.parent_ptr(block).await + } + + pub async fn chain_head_ptr(&self) -> Result, Error> { + if self.source_subgraph_stores.is_empty() { + return self.adapter.chain_head_ptr().await; + } + + let ptrs = futures03::future::try_join_all( + self.source_subgraph_stores + .iter() + .map(|(_, store)| store.block_ptr()), + ) + .await?; + + let min_ptr = ptrs.into_iter().flatten().min_by_key(|ptr| ptr.number); + + Ok(min_ptr) + } } #[async_trait] @@ -298,6 +631,15 @@ pub trait TriggersAdapter: Send + Sync { /// Get pointer to parent of `block`. This is called when reverting `block`. async fn parent_ptr(&self, block: &BlockPtr) -> Result, Error>; + + /// Get pointer to parent of `block`. This is called when reverting `block`. + async fn chain_head_ptr(&self) -> Result, Error>; + + async fn load_block_ptrs_by_numbers( + &self, + logger: Logger, + block_numbers: BTreeSet, + ) -> Result>; } #[async_trait] @@ -573,7 +915,6 @@ where #[derive(Clone)] pub struct BlockStreamMetrics { pub deployment_head: Box, - pub deployment_failed: Box, pub reverted_blocks: Gauge, pub stopwatch: StopwatchMetrics, } @@ -605,16 +946,8 @@ impl BlockStreamMetrics { labels.clone(), ) .expect("failed to create `deployment_head` gauge"); - let deployment_failed = registry - .new_gauge( - "deployment_failed", - "Boolean gauge to indicate whether the deployment has failed (1 == failed)", - labels, - ) - .expect("failed to create `deployment_failed` gauge"); Self { deployment_head, - deployment_failed, reverted_blocks, stopwatch, } diff --git a/graph/src/blockchain/client.rs b/graph/src/blockchain/client.rs index 8d83536b577..1ac1b4f892c 100644 --- a/graph/src/blockchain/client.rs +++ b/graph/src/blockchain/client.rs @@ -41,7 +41,7 @@ impl ChainClient { pub fn rpc(&self) -> anyhow::Result<&C::Client> { match self { Self::Rpc(rpc) => Ok(rpc), - _ => Err(anyhow!("rpc endpoint requested on firehose chain client")), + Self::Firehose(_) => Err(anyhow!("rpc endpoint requested on firehose chain client")), } } } diff --git a/graph/src/blockchain/mock.rs b/graph/src/blockchain/mock.rs index c89eca95727..430eb27bd85 100644 --- a/graph/src/blockchain/mock.rs +++ b/graph/src/blockchain/mock.rs @@ -2,22 +2,34 @@ use crate::{ bail, components::{ link_resolver::LinkResolver, - store::{BlockNumber, DeploymentCursorTracker, DeploymentLocator}, + store::{BlockNumber, DeploymentCursorTracker, DeploymentLocator, SourceableStore}, subgraph::InstanceDSTemplateInfo, }, data::subgraph::UnifiedMappingApiVersion, - prelude::{BlockHash, DataSourceTemplateInfo}, + data_source, + prelude::{ + transaction_receipt::LightTransactionReceipt, BlockHash, ChainStore, + DataSourceTemplateInfo, StoreError, + }, }; -use anyhow::Error; +use anyhow::{Error, Result}; use async_trait::async_trait; use serde::Deserialize; -use std::{collections::HashSet, convert::TryFrom, sync::Arc}; +use serde_json::Value; +use slog::Logger; +use std::{ + collections::{BTreeMap, BTreeSet, HashMap, HashSet}, + convert::TryFrom, + sync::Arc, +}; +use web3::types::H256; use super::{ block_stream::{self, BlockStream, FirehoseCursor}, client::ChainClient, - BlockIngestor, BlockTime, EmptyNodeCapabilities, HostFn, IngestorError, MappingTriggerTrait, - NoopDecoderHook, TriggerWithHandler, + BlockIngestor, BlockTime, ChainIdentifier, EmptyNodeCapabilities, ExtendedBlockPtr, HostFn, + IngestorError, MappingTriggerTrait, NoopDecoderHook, Trigger, TriggerFilterWrapper, + TriggerWithHandler, }; use super::{ @@ -36,15 +48,32 @@ pub struct MockBlock { impl Block for MockBlock { fn ptr(&self) -> BlockPtr { - todo!() + test_ptr(self.number as i32) } fn parent_ptr(&self) -> Option { - todo!() + if self.number == 0 { + None + } else { + Some(test_ptr(self.number as i32 - 1)) + } } fn timestamp(&self) -> BlockTime { - todo!() + BlockTime::since_epoch(self.ptr().number as i64 * 45 * 60, 0) + } +} + +pub fn test_ptr(n: BlockNumber) -> BlockPtr { + test_ptr_reorged(n, 0) +} + +pub fn test_ptr_reorged(n: BlockNumber, reorg_n: u32) -> BlockPtr { + let mut hash = H256::from_low_u64_be(n as u64); + hash[0..4].copy_from_slice(&reorg_n.to_be_bytes()); + BlockPtr { + hash: hash.into(), + number: n, } } @@ -218,31 +247,54 @@ impl UnresolvedDataSourceTemplate for MockUnresolvedDataSource pub struct MockTriggersAdapter; #[async_trait] -impl TriggersAdapter for MockTriggersAdapter { +impl TriggersAdapter for MockTriggersAdapter { async fn ancestor_block( &self, _ptr: BlockPtr, _offset: BlockNumber, _root: Option, - ) -> Result, Error> { + ) -> Result, Error> { todo!() } + async fn load_block_ptrs_by_numbers( + &self, + _logger: Logger, + block_numbers: BTreeSet, + ) -> Result> { + Ok(block_numbers + .into_iter() + .map(|number| MockBlock { + number: number as u64, + }) + .collect()) + } + + async fn chain_head_ptr(&self) -> Result, Error> { + unimplemented!() + } + async fn scan_triggers( &self, - _from: crate::components::store::BlockNumber, - _to: crate::components::store::BlockNumber, - _filter: &C::TriggerFilter, - ) -> Result<(Vec>, BlockNumber), Error> { - todo!() + from: crate::components::store::BlockNumber, + to: crate::components::store::BlockNumber, + filter: &MockTriggerFilter, + ) -> Result< + ( + Vec>, + BlockNumber, + ), + Error, + > { + blocks_with_triggers(from, to, filter).await } async fn triggers_in_block( &self, _logger: &slog::Logger, - _block: C::Block, - _filter: &C::TriggerFilter, - ) -> Result, Error> { + _block: MockBlock, + _filter: &MockTriggerFilter, + ) -> Result, Error> { todo!() } @@ -255,6 +307,26 @@ impl TriggersAdapter for MockTriggersAdapter { } } +async fn blocks_with_triggers( + _from: crate::components::store::BlockNumber, + to: crate::components::store::BlockNumber, + _filter: &MockTriggerFilter, +) -> Result< + ( + Vec>, + BlockNumber, + ), + Error, +> { + Ok(( + vec![BlockWithTriggers { + block: MockBlock { number: 0 }, + trigger_data: vec![Trigger::Chain(MockTriggerData)], + }], + to, + )) +} + #[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] pub struct MockTriggerData; @@ -303,7 +375,7 @@ impl TriggerFilter for MockTriggerFilter { pub struct MockRuntimeAdapter; impl RuntimeAdapter for MockRuntimeAdapter { - fn host_fns(&self, _ds: &C::DataSource) -> Result, Error> { + fn host_fns(&self, _ds: &data_source::DataSource) -> Result, Error> { todo!() } } @@ -347,7 +419,8 @@ impl Blockchain for MockBlockchain { _deployment: DeploymentLocator, _store: impl DeploymentCursorTracker, _start_blocks: Vec, - _filter: Arc, + _source_subgraph_stores: Vec>, + _filter: Arc>, _unified_api_version: UnifiedMappingApiVersion, ) -> Result>, Error> { todo!() @@ -391,3 +464,105 @@ impl Blockchain for MockBlockchain { todo!() } } + +// Mock implementation +#[derive(Default)] +pub struct MockChainStore { + pub blocks: BTreeMap>, +} + +#[async_trait] +impl ChainStore for MockChainStore { + async fn block_ptrs_by_numbers( + self: Arc, + numbers: Vec, + ) -> Result>, Error> { + let mut result = BTreeMap::new(); + for num in numbers { + if let Some(blocks) = self.blocks.get(&num) { + result.insert(num, blocks.clone()); + } + } + Ok(result) + } + + // Implement other required methods with minimal implementations + fn genesis_block_ptr(&self) -> Result { + unimplemented!() + } + async fn upsert_block(&self, _block: Arc) -> Result<(), Error> { + unimplemented!() + } + fn upsert_light_blocks(&self, _blocks: &[&dyn Block]) -> Result<(), Error> { + unimplemented!() + } + async fn attempt_chain_head_update( + self: Arc, + _ancestor_count: BlockNumber, + ) -> Result, Error> { + unimplemented!() + } + async fn chain_head_ptr(self: Arc) -> Result, Error> { + unimplemented!() + } + fn chain_head_cursor(&self) -> Result, Error> { + unimplemented!() + } + async fn set_chain_head( + self: Arc, + _block: Arc, + _cursor: String, + ) -> Result<(), Error> { + unimplemented!() + } + async fn blocks(self: Arc, _hashes: Vec) -> Result, Error> { + unimplemented!() + } + async fn ancestor_block( + self: Arc, + _block_ptr: BlockPtr, + _offset: BlockNumber, + _root: Option, + ) -> Result, Error> { + unimplemented!() + } + fn cleanup_cached_blocks( + &self, + _ancestor_count: BlockNumber, + ) -> Result, Error> { + unimplemented!() + } + fn block_hashes_by_block_number(&self, _number: BlockNumber) -> Result, Error> { + unimplemented!() + } + fn confirm_block_hash(&self, _number: BlockNumber, _hash: &BlockHash) -> Result { + unimplemented!() + } + async fn block_number( + &self, + _hash: &BlockHash, + ) -> Result, Option)>, StoreError> { + unimplemented!() + } + async fn block_numbers( + &self, + _hashes: Vec, + ) -> Result, StoreError> { + unimplemented!() + } + async fn transaction_receipts_in_block( + &self, + _block_ptr: &H256, + ) -> Result, StoreError> { + unimplemented!() + } + async fn clear_call_cache(&self, _from: BlockNumber, _to: BlockNumber) -> Result<(), Error> { + unimplemented!() + } + fn chain_identifier(&self) -> Result { + unimplemented!() + } + fn set_chain_identifier(&self, _ident: &ChainIdentifier) -> Result<(), Error> { + unimplemented!() + } +} diff --git a/graph/src/blockchain/mod.rs b/graph/src/blockchain/mod.rs index 9f3df60fe5f..2e3c60e88b0 100644 --- a/graph/src/blockchain/mod.rs +++ b/graph/src/blockchain/mod.rs @@ -19,13 +19,15 @@ use crate::{ cheap_clone::CheapClone, components::{ metrics::subgraph::SubgraphInstanceMetrics, - store::{DeploymentCursorTracker, DeploymentLocator, StoredDynamicDataSource}, + store::{ + DeploymentCursorTracker, DeploymentLocator, SourceableStore, StoredDynamicDataSource, + }, subgraph::{HostMetrics, InstanceDSTemplateInfo, MappingError}, trigger_processor::RunnableTriggers, }, data::subgraph::{UnifiedMappingApiVersion, MIN_SPEC_VERSION}, - data_source::{self, DataSourceTemplateInfo}, - prelude::DataSourceContext, + data_source::{self, subgraph, DataSourceTemplateInfo}, + prelude::{DataSourceContext, DeploymentHash}, runtime::{gas::GasCounter, AscHeap, HostExportError}, }; use crate::{ @@ -51,7 +53,7 @@ pub use block_stream::{ChainHeadUpdateListener, ChainHeadUpdateStream, TriggersA pub use builder::{BasicBlockchainBuilder, BlockchainBuilder}; pub use empty_node_capabilities::EmptyNodeCapabilities; pub use noop_runtime_adapter::NoopRuntimeAdapter; -pub use types::{BlockHash, BlockPtr, BlockTime, ChainIdentifier}; +pub use types::{BlockHash, BlockPtr, BlockTime, ChainIdentifier, ExtendedBlockPtr}; use self::{ block_stream::{BlockStream, FirehoseCursor}, @@ -189,7 +191,8 @@ pub trait Blockchain: Debug + Sized + Send + Sync + Unpin + 'static { deployment: DeploymentLocator, store: impl DeploymentCursorTracker, start_blocks: Vec, - filter: Arc, + source_subgraph_stores: Vec>, + filter: Arc>, unified_api_version: UnifiedMappingApiVersion, ) -> Result>, Error>; @@ -247,6 +250,43 @@ impl From for IngestorError { } } +/// The `TriggerFilterWrapper` is a higher-level wrapper around the chain-specific `TriggerFilter`, +/// enabling subgraph-based trigger filtering for subgraph datasources. This abstraction is necessary +/// because subgraph filtering operates at a higher level than chain-based filtering. By using this wrapper, +/// we reduce code duplication, allowing subgraph-based filtering to be implemented once, instead of +/// duplicating it across different chains. +#[derive(Debug)] +pub struct TriggerFilterWrapper { + pub chain_filter: Arc, + pub subgraph_filter: Vec, +} + +#[derive(Clone, Debug)] +pub struct SubgraphFilter { + pub subgraph: DeploymentHash, + pub start_block: BlockNumber, + pub entities: Vec, + pub manifest_idx: u32, +} + +impl TriggerFilterWrapper { + pub fn new(filter: C::TriggerFilter, subgraph_filter: Vec) -> Self { + Self { + chain_filter: Arc::new(filter), + subgraph_filter, + } + } +} + +impl Clone for TriggerFilterWrapper { + fn clone(&self) -> Self { + Self { + chain_filter: self.chain_filter.cheap_clone(), + subgraph_filter: self.subgraph_filter.clone(), + } + } +} + pub trait TriggerFilter: Default + Clone + Send + Sync { fn from_data_sources<'a>( data_sources: impl Iterator + Clone, @@ -370,6 +410,75 @@ pub trait UnresolvedDataSource: ) -> Result; } +#[derive(Debug)] +pub enum Trigger { + Chain(C::TriggerData), + Subgraph(subgraph::TriggerData), +} + +impl Trigger { + pub fn as_chain(&self) -> Option<&C::TriggerData> { + match self { + Trigger::Chain(data) => Some(data), + _ => None, + } + } + + pub fn as_subgraph(&self) -> Option<&subgraph::TriggerData> { + match self { + Trigger::Subgraph(data) => Some(data), + _ => None, + } + } +} + +impl Eq for Trigger where C::TriggerData: Eq {} + +impl PartialEq for Trigger +where + C::TriggerData: PartialEq, +{ + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Trigger::Chain(data1), Trigger::Chain(data2)) => data1 == data2, + (Trigger::Subgraph(a), Trigger::Subgraph(b)) => a == b, + _ => false, + } + } +} + +impl Clone for Trigger +where + C::TriggerData: Clone, +{ + fn clone(&self) -> Self { + match self { + Trigger::Chain(data) => Trigger::Chain(data.clone()), + Trigger::Subgraph(data) => Trigger::Subgraph(data.clone()), + } + } +} + +impl Ord for Trigger +where + C::TriggerData: Ord, +{ + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + match (self, other) { + (Trigger::Chain(data1), Trigger::Chain(data2)) => data1.cmp(data2), + (Trigger::Subgraph(_), Trigger::Chain(_)) => std::cmp::Ordering::Greater, + (Trigger::Chain(_), Trigger::Subgraph(_)) => std::cmp::Ordering::Less, + (Trigger::Subgraph(t1), Trigger::Subgraph(t2)) => t1.cmp(t2), + } + } +} + +impl PartialOrd for Trigger { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + pub trait TriggerData { /// If there is an error when processing this trigger, this will called to add relevant context. /// For example an useful return is: `"block # (), transaction ". @@ -436,7 +545,7 @@ pub struct HostFn { } pub trait RuntimeAdapter: Send + Sync { - fn host_fns(&self, ds: &C::DataSource) -> Result, Error>; + fn host_fns(&self, ds: &data_source::DataSource) -> Result, Error>; } pub trait NodeCapabilities { @@ -485,6 +594,7 @@ impl FromStr for BlockchainKind { "near" => Ok(BlockchainKind::Near), "cosmos" => Ok(BlockchainKind::Cosmos), "substreams" => Ok(BlockchainKind::Substreams), + "subgraph" => Ok(BlockchainKind::Ethereum), // TODO(krishna): We should detect the blockchain kind from the source subgraph _ => Err(anyhow!("unknown blockchain kind {}", s)), } } diff --git a/graph/src/blockchain/noop_runtime_adapter.rs b/graph/src/blockchain/noop_runtime_adapter.rs index 2f30a30e608..0b8b9e0707c 100644 --- a/graph/src/blockchain/noop_runtime_adapter.rs +++ b/graph/src/blockchain/noop_runtime_adapter.rs @@ -1,5 +1,7 @@ use std::marker::PhantomData; +use crate::data_source; + use super::{Blockchain, HostFn, RuntimeAdapter}; /// A [`RuntimeAdapter`] that does not expose any host functions. @@ -16,7 +18,7 @@ impl RuntimeAdapter for NoopRuntimeAdapter where C: Blockchain, { - fn host_fns(&self, _ds: &C::DataSource) -> anyhow::Result> { + fn host_fns(&self, _ds: &data_source::DataSource) -> anyhow::Result> { Ok(vec![]) } } diff --git a/graph/src/blockchain/polling_block_stream.rs b/graph/src/blockchain/polling_block_stream.rs index ce3fdf2a4ef..fa774261227 100644 --- a/graph/src/blockchain/polling_block_stream.rs +++ b/graph/src/blockchain/polling_block_stream.rs @@ -9,9 +9,9 @@ use std::time::Duration; use super::block_stream::{ BlockStream, BlockStreamError, BlockStreamEvent, BlockWithTriggers, ChainHeadUpdateStream, - FirehoseCursor, TriggersAdapter, BUFFERED_BLOCK_STREAM_SIZE, + FirehoseCursor, TriggersAdapterWrapper, BUFFERED_BLOCK_STREAM_SIZE, }; -use super::{Block, BlockPtr, Blockchain}; +use super::{Block, BlockPtr, Blockchain, TriggerFilterWrapper}; use crate::components::store::BlockNumber; use crate::data::subgraph::UnifiedMappingApiVersion; @@ -79,13 +79,13 @@ where C: Blockchain, { chain_store: Arc, - adapter: Arc>, + adapter: Arc>, node_id: NodeId, subgraph_id: DeploymentHash, // This is not really a block number, but the (unsigned) difference // between two block numbers reorg_threshold: BlockNumber, - filter: Arc, + filter: Arc>, start_blocks: Vec, logger: Logger, previous_triggers_per_block: f64, @@ -146,10 +146,10 @@ where pub fn new( chain_store: Arc, chain_head_update_stream: ChainHeadUpdateStream, - adapter: Arc>, + adapter: Arc>, node_id: NodeId, subgraph_id: DeploymentHash, - filter: Arc, + filter: Arc>, start_blocks: Vec, reorg_threshold: BlockNumber, logger: Logger, @@ -218,7 +218,7 @@ where let max_block_range_size = self.max_block_range_size; // Get pointers from database for comparison - let head_ptr_opt = ctx.chain_store.chain_head_ptr().await?; + let head_ptr_opt = ctx.adapter.chain_head_ptr().await?; let subgraph_ptr = self.current_block.clone(); // If chain head ptr is not set yet @@ -379,7 +379,10 @@ where ); // Update with actually scanned range, to account for any skipped null blocks. - let (blocks, to) = self.adapter.scan_triggers(from, to, &self.filter).await?; + let (blocks, to) = self + .adapter + .scan_triggers(&self.logger, from, to, &self.filter) + .await?; let range_size = to - from + 1; // If the target block (`to`) is within the reorg threshold, indicating no non-null finalized blocks are diff --git a/graph/src/blockchain/types.rs b/graph/src/blockchain/types.rs index 89c3e12039d..b2b802fbfac 100644 --- a/graph/src/blockchain/types.rs +++ b/graph/src/blockchain/types.rs @@ -5,10 +5,11 @@ use diesel::serialize::{Output, ToSql}; use diesel::sql_types::Timestamptz; use diesel::sql_types::{Bytea, Nullable, Text}; use diesel_derives::{AsExpression, FromSqlRow}; +use serde::{Deserialize, Deserializer}; use std::convert::TryFrom; use std::time::Duration; use std::{fmt, str::FromStr}; -use web3::types::{Block, H256}; +use web3::types::{Block, H256, U256, U64}; use crate::cheap_clone::CheapClone; use crate::components::store::BlockNumber; @@ -31,6 +32,10 @@ impl BlockHash { &self.0 } + pub fn as_h256(&self) -> H256 { + H256::from_slice(self.as_slice()) + } + /// Encodes the block hash into a hexadecimal string **without** a "0x" /// prefix. Hashes are stored in the database in this format when the /// schema uses `text` columns, which is a legacy and such columns @@ -44,6 +49,16 @@ impl BlockHash { } } +impl<'de> Deserialize<'de> for BlockHash { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s: String = Deserialize::deserialize(deserializer)?; + BlockHash::from_str(&s).map_err(serde::de::Error::custom) + } +} + impl CheapClone for BlockHash { fn cheap_clone(&self) -> Self { Self(self.0.clone()) @@ -326,6 +341,204 @@ impl From for BlockNumber { } } +fn deserialize_block_number<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let s: String = Deserialize::deserialize(deserializer)?; + + if s.starts_with("0x") { + let s = s.trim_start_matches("0x"); + i32::from_str_radix(s, 16).map_err(serde::de::Error::custom) + } else { + i32::from_str(&s).map_err(serde::de::Error::custom) + } +} + +fn deserialize_block_time<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let value = String::deserialize(deserializer)?; + + if value.starts_with("0x") { + let hex_value = value.trim_start_matches("0x"); + + i64::from_str_radix(hex_value, 16) + .map(|secs| BlockTime::since_epoch(secs, 0)) + .map_err(serde::de::Error::custom) + } else { + value + .parse::() + .map(|secs| BlockTime::since_epoch(secs, 0)) + .map_err(serde::de::Error::custom) + } +} +#[derive(Clone, PartialEq, Eq, Hash, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ExtendedBlockPtr { + pub hash: BlockHash, + #[serde(deserialize_with = "deserialize_block_number")] + pub number: BlockNumber, + pub parent_hash: BlockHash, + #[serde(deserialize_with = "deserialize_block_time")] + pub timestamp: BlockTime, +} + +impl ExtendedBlockPtr { + pub fn new( + hash: BlockHash, + number: BlockNumber, + parent_hash: BlockHash, + timestamp: BlockTime, + ) -> Self { + Self { + hash, + number, + parent_hash, + timestamp, + } + } + + /// Encodes the block hash into a hexadecimal string **without** a "0x" prefix. + /// Hashes are stored in the database in this format. + pub fn hash_hex(&self) -> String { + self.hash.hash_hex() + } + + /// Encodes the parent block hash into a hexadecimal string **without** a "0x" prefix. + pub fn parent_hash_hex(&self) -> String { + self.parent_hash.hash_hex() + } + + /// Block number to be passed into the store. Panics if it does not fit in an i32. + pub fn block_number(&self) -> BlockNumber { + self.number + } + + pub fn hash_as_h256(&self) -> H256 { + H256::from_slice(&self.hash_slice()[..32]) + } + + pub fn parent_hash_as_h256(&self) -> H256 { + H256::from_slice(&self.parent_hash_slice()[..32]) + } + + pub fn hash_slice(&self) -> &[u8] { + self.hash.0.as_ref() + } + + pub fn parent_hash_slice(&self) -> &[u8] { + self.parent_hash.0.as_ref() + } +} + +impl fmt::Display for ExtendedBlockPtr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "#{} ({}) [parent: {}]", + self.number, + self.hash_hex(), + self.parent_hash_hex() + ) + } +} + +impl fmt::Debug for ExtendedBlockPtr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "#{} ({}) [parent: {}]", + self.number, + self.hash_hex(), + self.parent_hash_hex() + ) + } +} + +impl slog::Value for ExtendedBlockPtr { + fn serialize( + &self, + record: &slog::Record, + key: slog::Key, + serializer: &mut dyn slog::Serializer, + ) -> slog::Result { + slog::Value::serialize(&self.to_string(), record, key, serializer) + } +} + +impl IntoValue for ExtendedBlockPtr { + fn into_value(self) -> r::Value { + object! { + __typename: "Block", + hash: self.hash_hex(), + number: format!("{}", self.number), + parent_hash: self.parent_hash_hex(), + timestamp: format!("{}", self.timestamp), + } + } +} + +impl TryFrom<(Option, Option, H256, U256)> for ExtendedBlockPtr { + type Error = anyhow::Error; + + fn try_from(tuple: (Option, Option, H256, U256)) -> Result { + let (hash_opt, number_opt, parent_hash, timestamp_u256) = tuple; + + let hash = hash_opt.ok_or_else(|| anyhow!("Block hash is missing"))?; + let number = number_opt + .ok_or_else(|| anyhow!("Block number is missing"))? + .as_u64(); + + let block_number = + i32::try_from(number).map_err(|_| anyhow!("Block number out of range"))?; + + // Convert `U256` to `BlockTime` + let secs = + i64::try_from(timestamp_u256).map_err(|_| anyhow!("Timestamp out of range for i64"))?; + let block_time = BlockTime::since_epoch(secs, 0); + + Ok(ExtendedBlockPtr { + hash: hash.into(), + number: block_number, + parent_hash: parent_hash.into(), + timestamp: block_time, + }) + } +} + +impl TryFrom<(H256, i32, H256, U256)> for ExtendedBlockPtr { + type Error = anyhow::Error; + + fn try_from(tuple: (H256, i32, H256, U256)) -> Result { + let (hash, block_number, parent_hash, timestamp_u256) = tuple; + + // Convert `U256` to `BlockTime` + let secs = + i64::try_from(timestamp_u256).map_err(|_| anyhow!("Timestamp out of range for i64"))?; + let block_time = BlockTime::since_epoch(secs, 0); + + Ok(ExtendedBlockPtr { + hash: hash.into(), + number: block_number, + parent_hash: parent_hash.into(), + timestamp: block_time, + }) + } +} +impl From for H256 { + fn from(ptr: ExtendedBlockPtr) -> Self { + ptr.hash_as_h256() + } +} + +impl From for BlockNumber { + fn from(ptr: ExtendedBlockPtr) -> Self { + ptr.number + } +} + #[derive(Clone, Debug, PartialEq, Eq, Hash)] /// A collection of attributes that (kind of) uniquely identify a blockchain. pub struct ChainIdentifier { @@ -360,7 +573,9 @@ impl fmt::Display for ChainIdentifier { /// The timestamp associated with a block. This is used whenever a time /// needs to be connected to data within the block -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, FromSqlRow, AsExpression)] +#[derive( + Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, FromSqlRow, AsExpression, Deserialize, +)] #[diesel(sql_type = Timestamptz)] pub struct BlockTime(Timestamp); @@ -441,3 +656,80 @@ impl FromSql for BlockTime { >::from_sql(bytes).map(|ts| Self(ts)) } } + +impl fmt::Display for BlockTime { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0.as_microseconds_since_epoch()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json; + + #[test] + fn test_blockhash_deserialization() { + let json_data = "\"0x8186da3ec5590631ae7b9415ce58548cb98c7f1dc68c5ea1c519a3f0f6a25aac\""; + + let block_hash: BlockHash = + serde_json::from_str(json_data).expect("Deserialization failed"); + + let expected_bytes = + hex::decode("8186da3ec5590631ae7b9415ce58548cb98c7f1dc68c5ea1c519a3f0f6a25aac") + .expect("Hex decoding failed"); + + assert_eq!( + *block_hash.0, expected_bytes, + "BlockHash does not match expected bytes" + ); + } + + #[test] + fn test_block_ptr_ext_deserialization() { + // JSON data with a hex string for BlockNumber + let json_data = r#" + { + "hash": "0x8186da3ec5590631ae7b9415ce58548cb98c7f1dc68c5ea1c519a3f0f6a25aac", + "number": "0x2A", + "parentHash": "0xd71699894d637632dea4d425396086edf033c1ff72b13753e8c4e67700e3eb8e", + "timestamp": "0x673b284f" + } + "#; + + // Deserialize the JSON string into a ExtendedBlockPtr + let block_ptr_ext: ExtendedBlockPtr = + serde_json::from_str(json_data).expect("Deserialization failed"); + + // Verify the deserialized values + assert_eq!(block_ptr_ext.number, 42); // 0x2A in hex is 42 in decimal + assert_eq!( + block_ptr_ext.hash_hex(), + "8186da3ec5590631ae7b9415ce58548cb98c7f1dc68c5ea1c519a3f0f6a25aac" + ); + assert_eq!( + block_ptr_ext.parent_hash_hex(), + "d71699894d637632dea4d425396086edf033c1ff72b13753e8c4e67700e3eb8e" + ); + assert_eq!(block_ptr_ext.timestamp.0.as_secs_since_epoch(), 1731930191); + } + + #[test] + fn test_invalid_block_number_deserialization() { + let invalid_json_data = r#" + { + "hash": "0x8186da3ec5590631ae7b9415ce58548cb98c7f1dc68c5ea1c519a3f0f6a25aac", + "number": "invalid_hex_string", + "parentHash": "0xd71699894d637632dea4d425396086edf033c1ff72b13753e8c4e67700e3eb8e", + "timestamp": "123456789012345678901234567890" + } + "#; + + let result: Result = serde_json::from_str(invalid_json_data); + + assert!( + result.is_err(), + "Deserialization should have failed for invalid block number" + ); + } +} diff --git a/graph/src/components/metrics/registry.rs b/graph/src/components/metrics/registry.rs index 7fa5b903b05..e010d3a89fa 100644 --- a/graph/src/components/metrics/registry.rs +++ b/graph/src/components/metrics/registry.rs @@ -1,7 +1,9 @@ use std::collections::HashMap; use std::sync::{Arc, RwLock}; +use prometheus::IntGauge; use prometheus::{labels, Histogram, IntCounterVec}; +use slog::info; use crate::components::metrics::{counter_with_labels, gauge_with_labels}; use crate::prelude::Collector; @@ -120,52 +122,36 @@ impl MetricsRegistry { } } - pub fn register(&self, name: &str, c: Box) { - let err = match self.registry.register(c).err() { - None => { + /// Adds the metric to the registry. + /// + /// If the metric is a duplicate, it replaces a previous registration. + fn register(&self, name: &str, collector: Box) + where + T: Collector + Clone + 'static, + { + let logger = self.logger.new(o!("metric_name" => name.to_string())); + let mut result = self.registry.register(collector.clone()); + + if matches!(result, Err(PrometheusError::AlreadyReg)) { + info!(logger, "Resolving duplicate metric registration"); + + // Since the current metric is a duplicate, + // we can use it to unregister the previous registration. + self.unregister(collector.clone()); + + result = self.registry.register(collector); + } + + match result { + Ok(()) => { + info!(logger, "Successfully registered a new metric"); self.registered_metrics.inc(); - return; } - Some(err) => { + Err(err) => { + error!(logger, "Failed to register a new metric"; "error" => format!("{err:#}")); self.register_errors.inc(); - err - } - }; - match err { - PrometheusError::AlreadyReg => { - error!( - self.logger, - "registering metric [{}] failed because it was already registered", name, - ); - } - PrometheusError::InconsistentCardinality { expect, got } => { - error!( - self.logger, - "registering metric [{}] failed due to inconsistent caridinality, expected = {} got = {}", - name, - expect, - got, - ); - } - PrometheusError::Msg(msg) => { - error!( - self.logger, - "registering metric [{}] failed because: {}", name, msg, - ); } - PrometheusError::Io(err) => { - error!( - self.logger, - "registering metric [{}] failed due to io error: {}", name, err, - ); - } - PrometheusError::Protobuf(err) => { - error!( - self.logger, - "registering metric [{}] failed due to protobuf error: {}", name, err - ); - } - }; + } } pub fn global_counter( @@ -510,6 +496,23 @@ impl MetricsRegistry { self.register(name, histograms.clone()); Ok(histograms) } + + pub fn new_int_gauge( + &self, + name: impl AsRef, + help: impl AsRef, + const_labels: impl IntoIterator, impl Into)>, + ) -> Result { + let opts = Opts::new(name.as_ref(), help.as_ref()).const_labels( + const_labels + .into_iter() + .map(|(a, b)| (a.into(), b.into())) + .collect(), + ); + let gauge = IntGauge::with_opts(opts)?; + self.register(name.as_ref(), Box::new(gauge.clone())); + Ok(gauge) + } } fn deployment_labels(subgraph: &str) -> HashMap { diff --git a/graph/src/components/metrics/subgraph.rs b/graph/src/components/metrics/subgraph.rs index d9b68da8631..6083ebb6677 100644 --- a/graph/src/components/metrics/subgraph.rs +++ b/graph/src/components/metrics/subgraph.rs @@ -1,21 +1,25 @@ -use prometheus::Counter; - -use crate::blockchain::block_stream::BlockStreamMetrics; -use crate::prelude::{Gauge, Histogram, HostMetrics}; use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; +use prometheus::Counter; +use prometheus::IntGauge; + use super::stopwatch::StopwatchMetrics; use super::MetricsRegistry; +use crate::blockchain::block_stream::BlockStreamMetrics; +use crate::components::store::DeploymentLocator; +use crate::prelude::{Gauge, Histogram, HostMetrics}; pub struct SubgraphInstanceMetrics { pub block_trigger_count: Box, pub block_processing_duration: Box, pub block_ops_transaction_duration: Box, pub firehose_connection_errors: Counter, - pub stopwatch: StopwatchMetrics, + pub deployment_status: DeploymentStatusMetric, + pub deployment_synced: DeploymentSyncedMetric, + trigger_processing_duration: Box, blocks_processed_secs: Box, blocks_processed_count: Box, @@ -26,6 +30,7 @@ impl SubgraphInstanceMetrics { registry: Arc, subgraph_hash: &str, stopwatch: StopwatchMetrics, + deployment_status: DeploymentStatusMetric, ) -> Self { let block_trigger_count = registry .new_deployment_histogram( @@ -86,13 +91,18 @@ impl SubgraphInstanceMetrics { labels, ) .expect("failed to create blocks_processed_count counter"); + + let deployment_synced = DeploymentSyncedMetric::register(®istry, subgraph_hash); + Self { block_trigger_count, block_processing_duration, - trigger_processing_duration, block_ops_transaction_duration, firehose_connection_errors, stopwatch, + deployment_status, + deployment_synced, + trigger_processing_duration, blocks_processed_secs, blocks_processed_count, } @@ -114,6 +124,7 @@ impl SubgraphInstanceMetrics { registry.unregister(self.block_trigger_count.clone()); registry.unregister(self.trigger_processing_duration.clone()); registry.unregister(self.block_ops_transaction_duration.clone()); + registry.unregister(Box::new(self.deployment_synced.inner.clone())); } } @@ -154,3 +165,105 @@ pub struct RunnerMetrics { /// Sensors to measure the BlockStream metrics pub stream: Arc, } + +/// Reports the current indexing status of a deployment. +#[derive(Clone)] +pub struct DeploymentStatusMetric { + inner: IntGauge, +} + +impl DeploymentStatusMetric { + const STATUS_STARTING: i64 = 1; + const STATUS_RUNNING: i64 = 2; + const STATUS_STOPPED: i64 = 3; + const STATUS_FAILED: i64 = 4; + + /// Registers the metric. + pub fn register(registry: &MetricsRegistry, deployment: &DeploymentLocator) -> Self { + let deployment_status = registry + .new_int_gauge( + "deployment_status", + "Indicates the current indexing status of a deployment.\n\ + Possible values:\n\ + 1 - graph-node is preparing to start indexing;\n\ + 2 - deployment is being indexed;\n\ + 3 - indexing is stopped by request;\n\ + 4 - indexing failed;", + [("deployment", deployment.hash.as_str())], + ) + .expect("failed to register `deployment_status` gauge"); + + Self { + inner: deployment_status, + } + } + + /// Records that the graph-node is preparing to start indexing. + pub fn starting(&self) { + self.inner.set(Self::STATUS_STARTING); + } + + /// Records that the deployment is being indexed. + pub fn running(&self) { + self.inner.set(Self::STATUS_RUNNING); + } + + /// Records that the indexing is stopped by request. + pub fn stopped(&self) { + self.inner.set(Self::STATUS_STOPPED); + } + + /// Records that the indexing failed. + pub fn failed(&self) { + self.inner.set(Self::STATUS_FAILED); + } +} + +/// Indicates whether a deployment has reached the chain head since it was deployed. +pub struct DeploymentSyncedMetric { + inner: IntGauge, + + // If, for some reason, a deployment reports that it is synced, and then reports that it is not + // synced during an execution, this prevents the metric from reverting to the not synced state. + previously_synced: std::sync::OnceLock<()>, +} + +impl DeploymentSyncedMetric { + const NOT_SYNCED: i64 = 0; + const SYNCED: i64 = 1; + + /// Registers the metric. + pub fn register(registry: &MetricsRegistry, deployment_hash: &str) -> Self { + let metric = registry + .new_int_gauge( + "deployment_synced", + "Indicates whether a deployment has reached the chain head since it was deployed.\n\ + Possible values:\n\ + 0 - deployment is not synced;\n\ + 1 - deployment is synced;", + [("deployment", deployment_hash)], + ) + .expect("failed to register `deployment_synced` gauge"); + + Self { + inner: metric, + previously_synced: std::sync::OnceLock::new(), + } + } + + /// Records the current sync status of the deployment. + /// Will ignore all values after the first `true` is received. + pub fn record(&self, synced: bool) { + if self.previously_synced.get().is_some() { + return; + } + + if synced { + self.inner.set(Self::SYNCED); + let _ = self.previously_synced.set(()); + return; + } + + self.inner.set(Self::NOT_SYNCED); + } +} diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index a34767d54d2..062dd67dfc2 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -1,4 +1,4 @@ -use anyhow::anyhow; +use anyhow::{anyhow, bail}; use std::borrow::Borrow; use std::collections::HashMap; use std::fmt::{self, Debug}; @@ -17,6 +17,10 @@ use super::{BlockNumber, DerivedEntityQuery, LoadRelatedRequest, StoreError}; pub type EntityLfuCache = LfuCache>>; +// Number of VIDs that are reserved outside of the generated ones here. +// Currently none is used, but lets reserve a few more. +const RESERVED_VIDS: u32 = 100; + /// The scope in which the `EntityCache` should perform a `get` operation pub enum GetScope { /// Get from all previously stored entities in the store @@ -105,6 +109,10 @@ pub struct EntityCache { /// generated IDs, the `EntityCache` needs to be newly instantiated for /// each block seq: u32, + + // Sequence number of the next VID value for this block. The value written + // in the database consist of a block number and this SEQ number. + pub vid_seq: u32, } impl Debug for EntityCache { @@ -132,6 +140,7 @@ impl EntityCache { schema: store.input_schema(), store, seq: 0, + vid_seq: RESERVED_VIDS, } } @@ -152,6 +161,7 @@ impl EntityCache { schema: store.input_schema(), store, seq: 0, + vid_seq: RESERVED_VIDS, } } @@ -197,9 +207,14 @@ impl EntityCache { }; // Always test the cache consistency in debug mode. The test only - // makes sense when we were actually asked to read from the store + // makes sense when we were actually asked to read from the store. + // We need to remove the VID as the one from the DB might come from + // a legacy subgraph that has VID autoincremented while this trait + // always creates it in a new style. debug_assert!(match scope { - GetScope::Store => entity == self.store.get(key).unwrap().map(Arc::new), + GetScope::Store => { + entity == self.store.get(key).unwrap().map(Arc::new) + } GetScope::InBlock => true, }); @@ -353,6 +368,7 @@ impl EntityCache { &mut self, key: EntityKey, entity: Entity, + block: BlockNumber, write_capacity_remaining: Option<&mut usize>, ) -> Result<(), anyhow::Error> { // check the validate for derived fields @@ -360,7 +376,6 @@ impl EntityCache { if let Some(write_capacity_remaining) = write_capacity_remaining { let weight = entity.weight(); - if !self.current.contains_key(&key) && weight > *write_capacity_remaining { return Err(anyhow!( "exceeded block write limit when writing entity `{}`", @@ -371,6 +386,21 @@ impl EntityCache { *write_capacity_remaining -= weight; } + // The next VID is based on a block number and a sequence within the block + let vid = ((block as i64) << 32) + self.vid_seq as i64; + self.vid_seq += 1; + let mut entity = entity; + let old_vid = entity.set_vid(vid).expect("the vid should be set"); + // Make sure that there was no VID previously set for this entity. + if let Some(ovid) = old_vid { + bail!( + "VID: {} of entity: {} with ID: {} was already present when set in EntityCache", + ovid, + key.entity_type, + entity.id() + ); + } + self.entity_op(key.clone(), EntityOp::Update(entity)); // The updates we were given are not valid by themselves; force a @@ -507,7 +537,7 @@ impl EntityCache { // Entity was removed and then updated, so it will be overwritten (Some(current), EntityOp::Overwrite(data)) => { let data = Arc::new(data); - self.current.insert(key.clone(), Some(data.clone())); + self.current.insert(key.clone(), Some(data.cheap_clone())); if current != data { Some(Overwrite { key, diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index 0ac80902a66..2292a1f61f5 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -1,12 +1,13 @@ use std::collections::HashMap; +use std::ops::Range; use anyhow::Error; use async_trait::async_trait; use web3::types::{Address, H256}; use super::*; -use crate::blockchain::block_stream::FirehoseCursor; -use crate::blockchain::{BlockTime, ChainIdentifier}; +use crate::blockchain::block_stream::{EntitySourceOperation, FirehoseCursor}; +use crate::blockchain::{BlockTime, ChainIdentifier, ExtendedBlockPtr}; use crate::components::metrics::stopwatch::StopwatchMetrics; use crate::components::server::index_node::VersionInfo; use crate::components::subgraph::SubgraphVersionSwitchingMode; @@ -185,6 +186,11 @@ pub trait SubgraphStore: Send + Sync + 'static { manifest_idx_and_name: Arc>, ) -> Result, StoreError>; + async fn sourceable( + self: Arc, + deployment: DeploymentId, + ) -> Result, StoreError>; + /// Initiate a graceful shutdown of the writable that a previous call to /// `writable` might have started async fn stop_subgraph(&self, deployment: &DeploymentLocator) -> Result<(), StoreError>; @@ -287,6 +293,44 @@ impl DeploymentCursorTracker for Arc { } } +#[async_trait] +pub trait SourceableStore: Sync + Send + 'static { + /// Returns all versions of entities of the given entity_type that were + /// changed in the given block_range. + fn get_range( + &self, + entity_types: Vec, + causality_region: CausalityRegion, + block_range: Range, + ) -> Result>, StoreError>; + + fn input_schema(&self) -> InputSchema; + + /// Get a pointer to the most recently processed block in the subgraph. + async fn block_ptr(&self) -> Result, StoreError>; +} + +// This silly impl is needed until https://github.com/rust-lang/rust/issues/65991 is stable. +#[async_trait] +impl SourceableStore for Arc { + fn get_range( + &self, + entity_types: Vec, + causality_region: CausalityRegion, + block_range: Range, + ) -> Result>, StoreError> { + (**self).get_range(entity_types, causality_region, block_range) + } + + fn input_schema(&self) -> InputSchema { + (**self).input_schema() + } + + async fn block_ptr(&self) -> Result, StoreError> { + (**self).block_ptr().await + } +} + /// A view of the store for indexing. All indexing-related operations need /// to go through this trait. Methods in this trait will never return a /// `StoreError::DatabaseUnavailable`. Instead, they will retry the @@ -478,6 +522,12 @@ pub trait ChainStore: Send + Sync + 'static { hashes: Vec, ) -> Result, Error>; + /// Returns the blocks present in the store for the given block numbers. + async fn block_ptrs_by_numbers( + self: Arc, + numbers: Vec, + ) -> Result>, Error>; + /// Get the `offset`th ancestor of `block_hash`, where offset=0 means the block matching /// `block_hash` and offset=1 means its parent. If `root` is passed, short-circuit upon finding /// a child of `root`. Returns None if unable to complete due to missing blocks in the chain diff --git a/graph/src/components/subgraph/instance.rs b/graph/src/components/subgraph/instance.rs index 5609b2ac8f4..11b473a878d 100644 --- a/graph/src/components/subgraph/instance.rs +++ b/graph/src/components/subgraph/instance.rs @@ -20,6 +20,7 @@ impl From<&DataSourceTemplate> for InstanceDSTemplate { match value { DataSourceTemplate::Onchain(ds) => Self::Onchain(ds.info()), DataSourceTemplate::Offchain(ds) => Self::Offchain(ds.clone()), + DataSourceTemplate::Subgraph(_) => todo!(), // TODO(krishna) } } } diff --git a/graph/src/components/subgraph/proof_of_indexing/online.rs b/graph/src/components/subgraph/proof_of_indexing/online.rs index caaa76f0a76..d47f08b0a8f 100644 --- a/graph/src/components/subgraph/proof_of_indexing/online.rs +++ b/graph/src/components/subgraph/proof_of_indexing/online.rs @@ -146,8 +146,8 @@ impl BlockEventStream { fn write(&mut self, event: &ProofOfIndexingEvent<'_>) { let children = &[ 1, // kvp -> v - 0, // PoICausalityRegion.blocks: Vec - self.block_index, // Vec -> [i] + 0, // PoICausalityRegion.blocks: Result> + self.block_index, // Result> -> [i] 0, // Block.events -> Vec self.vec_length, ]; @@ -242,6 +242,10 @@ impl ProofOfIndexing { pub fn take(self) -> HashMap { self.per_causality_region } + + pub fn get_block(&self) -> BlockNumber { + self.block_number + } } pub struct ProofOfIndexingFinisher { diff --git a/graph/src/data/store/mod.rs b/graph/src/data/store/mod.rs index 33d9286ceec..25c0d3e0813 100644 --- a/graph/src/data/store/mod.rs +++ b/graph/src/data/store/mod.rs @@ -3,7 +3,7 @@ use crate::{ derive::CacheWeight, prelude::{lazy_static, q, r, s, CacheWeight, QueryExecutionError}, runtime::gas::{Gas, GasSizeOf}, - schema::{EntityKey, EntityType}, + schema::{input::VID_FIELD, EntityKey, EntityType}, util::intern::{self, AtomPool}, util::intern::{Error as InternError, NullValue, Object}, }; @@ -735,19 +735,22 @@ where lazy_static! { /// The name of the id attribute, `"id"` pub static ref ID: Word = Word::from("id"); + /// The name of the vid attribute, `"vid"` + pub static ref VID: Word = Word::from("vid"); } /// An entity is represented as a map of attribute names to values. -#[derive(Clone, CacheWeight, PartialEq, Eq, Serialize)] +#[derive(Clone, CacheWeight, Eq, Serialize)] pub struct Entity(Object); impl<'a> IntoIterator for &'a Entity { - type Item = (Word, Value); + type Item = (&'a str, &'a Value); - type IntoIter = intern::ObjectOwningIter; + type IntoIter = + std::iter::Filter, fn(&(&'a str, &'a Value)) -> bool>; fn into_iter(self) -> Self::IntoIter { - self.0.clone().into_iter() + (&self.0).into_iter().filter(|(k, _)| *k != VID_FIELD) } } @@ -872,22 +875,34 @@ impl Entity { } pub fn get(&self, key: &str) -> Option<&Value> { + // VID field is private and not visible outside + if key == VID_FIELD { + return None; + } self.0.get(key) } pub fn contains_key(&self, key: &str) -> bool { + // VID field is private and not visible outside + if key == VID_FIELD { + return false; + } self.0.contains_key(key) } // This collects the entity into an ordered vector so that it can be iterated deterministically. pub fn sorted(self) -> Vec<(Word, Value)> { - let mut v: Vec<_> = self.0.into_iter().map(|(k, v)| (k, v)).collect(); + let mut v: Vec<_> = self + .0 + .into_iter() + .filter(|(k, _)| !k.eq(VID_FIELD)) + .collect(); v.sort_by(|(k1, _), (k2, _)| k1.cmp(k2)); v } pub fn sorted_ref(&self) -> Vec<(&str, &Value)> { - let mut v: Vec<_> = self.0.iter().collect(); + let mut v: Vec<_> = self.0.iter().filter(|(k, _)| !k.eq(&VID_FIELD)).collect(); v.sort_by(|(k1, _), (k2, _)| k1.cmp(k2)); v } @@ -910,6 +925,21 @@ impl Entity { Id::try_from(self.get("id").unwrap().clone()).expect("the id is set to a valid value") } + /// Return the VID of this entity and if its missing or of a type different than + /// i64 it panics. + pub fn vid(&self) -> i64 { + self.0 + .get(VID_FIELD) + .expect("the vid must be set") + .as_int8() + .expect("the vid must be set to a valid value") + } + + /// Sets the VID of the entity. The previous one is returned. + pub fn set_vid(&mut self, value: i64) -> Result, InternError> { + self.0.insert(VID_FIELD, value.into()) + } + /// Merges an entity update `update` into this entity. /// /// If a key exists in both entities, the value from `update` is chosen. @@ -1033,6 +1063,13 @@ impl Entity { } } +/// Checks equality of two entities while ignoring the VID fields +impl PartialEq for Entity { + fn eq(&self, other: &Self) -> bool { + self.0.eq_ignore_key(&other.0, VID_FIELD) + } +} + /// Convenience methods to modify individual attributes for tests. /// Production code should not use/need this. #[cfg(debug_assertions)] @@ -1052,6 +1089,14 @@ impl Entity { ) -> Result, InternError> { self.0.insert(name, value.into()) } + + /// Sets the VID if it's not already set. Should be used only for tests. + pub fn set_vid_if_empty(&mut self) { + let vid = self.0.get(VID_FIELD); + if vid.is_none() { + let _ = self.set_vid(100).expect("the vid should be set"); + } + } } impl<'a> From<&'a Entity> for Cow<'a, Entity> { @@ -1243,3 +1288,47 @@ fn fmt_debug() { let bi = Value::BigInt(scalar::BigInt::from(-17i32)); assert_eq!("BigInt(-17)", format!("{:?}", bi)); } + +#[test] +fn entity_hidden_vid() { + use crate::schema::InputSchema; + let subgraph_id = "oneInterfaceOneEntity"; + let document = "type Thing @entity {id: ID!, name: String!}"; + let schema = InputSchema::raw(document, subgraph_id); + + let entity = entity! { schema => id: "1", name: "test", vid: 3i64 }; + let debug_str = format!("{:?}", entity); + let entity_str = "Entity { id: String(\"1\"), name: String(\"test\"), vid: Int8(3) }"; + assert_eq!(debug_str, entity_str); + + // get returns nothing... + assert_eq!(entity.get(VID_FIELD), None); + assert_eq!(entity.contains_key(VID_FIELD), false); + // ...while vid is present + assert_eq!(entity.vid(), 3i64); + + // into_iter() misses it too + let mut it = entity.into_iter(); + assert_eq!(Some(("id", &Value::String("1".to_string()))), it.next()); + assert_eq!( + Some(("name", &Value::String("test".to_string()))), + it.next() + ); + assert_eq!(None, it.next()); + + let mut entity2 = entity! { schema => id: "1", name: "test", vid: 5i64 }; + assert_eq!(entity2.vid(), 5i64); + // equal with different vid + assert_eq!(entity, entity2); + + entity2.remove(VID_FIELD); + // equal if one has no vid + assert_eq!(entity, entity2); + let debug_str2 = format!("{:?}", entity2); + let entity_str2 = "Entity { id: String(\"1\"), name: String(\"test\") }"; + assert_eq!(debug_str2, entity_str2); + + // set again + _ = entity2.set_vid(7i64); + assert_eq!(entity2.vid(), 7i64); +} diff --git a/graph/src/data/subgraph/api_version.rs b/graph/src/data/subgraph/api_version.rs index e626e9f1dbc..fbda95b2792 100644 --- a/graph/src/data/subgraph/api_version.rs +++ b/graph/src/data/subgraph/api_version.rs @@ -54,8 +54,14 @@ pub const SPEC_VERSION_1_1_0: Version = Version::new(1, 1, 0); // Enables eth call declarations and indexed arguments(topics) filtering in manifest pub const SPEC_VERSION_1_2_0: Version = Version::new(1, 2, 0); +// Enables subgraphs as datasource. +// Changes the way the VID field is generated. It used to be autoincrement. Now its +// based on block number and the order of the entities in a block. The latter +// represents the write order across all entity types in the subgraph. +pub const SPEC_VERSION_1_3_0: Version = Version::new(1, 3, 0); + // The latest spec version available -pub const LATEST_VERSION: &Version = &SPEC_VERSION_1_2_0; +pub const LATEST_VERSION: &Version = &SPEC_VERSION_1_3_0; pub const MIN_SPEC_VERSION: Version = Version::new(0, 0, 2); diff --git a/graph/src/data/subgraph/mod.rs b/graph/src/data/subgraph/mod.rs index df379845c00..d14b2c89b29 100644 --- a/graph/src/data/subgraph/mod.rs +++ b/graph/src/data/subgraph/mod.rs @@ -33,7 +33,7 @@ use web3::types::Address; use crate::{ bail, - blockchain::{BlockPtr, Blockchain, DataSource as _}, + blockchain::{BlockPtr, Blockchain}, components::{ link_resolver::LinkResolver, store::{StoreError, SubgraphStore}, @@ -140,6 +140,10 @@ impl DeploymentHash { link: format!("/ipfs/{}", self), } } + + pub fn to_bytes(&self) -> Vec { + self.0.as_bytes().to_vec() + } } impl Deref for DeploymentHash { @@ -573,7 +577,7 @@ pub struct BaseSubgraphManifest { #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] pub struct IndexerHints { - prune: Option, + pub prune: Option, } impl IndexerHints { @@ -668,6 +672,73 @@ pub type SubgraphManifest = pub struct UnvalidatedSubgraphManifest(SubgraphManifest); impl UnvalidatedSubgraphManifest { + fn validate_subgraph_datasources( + data_sources: &[DataSource], + spec_version: &Version, + ) -> Vec { + let mut errors = Vec::new(); + + // Check spec version support for subgraph datasources + if *spec_version < SPEC_VERSION_1_3_0 { + if data_sources + .iter() + .any(|ds| matches!(ds, DataSource::Subgraph(_))) + { + errors.push(SubgraphManifestValidationError::DataSourceValidation( + "subgraph".to_string(), + anyhow!( + "Subgraph datasources are not supported prior to spec version {}", + SPEC_VERSION_1_3_0 + ), + )); + return errors; + } + } + + let subgraph_ds_count = data_sources + .iter() + .filter(|ds| matches!(ds, DataSource::Subgraph(_))) + .count(); + + if subgraph_ds_count > 5 { + errors.push(SubgraphManifestValidationError::DataSourceValidation( + "subgraph".to_string(), + anyhow!("Cannot have more than 5 subgraph datasources"), + )); + } + + let has_subgraph_ds = subgraph_ds_count > 0; + let has_onchain_ds = data_sources + .iter() + .any(|d| matches!(d, DataSource::Onchain(_))); + + if has_subgraph_ds && has_onchain_ds { + errors.push(SubgraphManifestValidationError::DataSourceValidation( + "subgraph".to_string(), + anyhow!("Subgraph datasources cannot be used alongside onchain datasources"), + )); + } + + // Check for duplicate source subgraphs + let mut seen_sources = std::collections::HashSet::new(); + for ds in data_sources.iter() { + if let DataSource::Subgraph(ds) = ds { + let source_id = ds.source.address(); + if !seen_sources.insert(source_id.clone()) { + errors.push(SubgraphManifestValidationError::DataSourceValidation( + "subgraph".to_string(), + anyhow!( + "Multiple subgraph datasources cannot use the same source subgraph {}", + source_id + ), + )); + } + } + } + + errors + } + /// Entry point for resolving a subgraph definition. /// Right now the only supported links are of the form: /// `/ipfs/QmUmg7BZC1YP1ca66rRtWKxpXp77WgVHrnv263JtDuvs2k` @@ -713,7 +784,7 @@ impl UnvalidatedSubgraphManifest { .0 .data_sources .iter() - .filter_map(|d| Some(d.as_onchain()?.network()?.to_string())) + .filter_map(|d| Some(d.network()?.to_string())) .collect::>(); networks.sort(); networks.dedup(); @@ -738,6 +809,12 @@ impl UnvalidatedSubgraphManifest { } } + // Validate subgraph datasource constraints + errors.extend(Self::validate_subgraph_datasources( + &self.0.data_sources, + &self.0.spec_version, + )); + match errors.is_empty() { true => Ok(self.0), false => Err(errors), @@ -759,11 +836,9 @@ impl SubgraphManifest { max_spec_version: semver::Version, ) -> Result { let unresolved = UnresolvedSubgraphManifest::parse(id, raw)?; - let resolved = unresolved .resolve(resolver, logger, max_spec_version) .await?; - Ok(resolved) } @@ -771,14 +846,14 @@ impl SubgraphManifest { // Assume the manifest has been validated, ensuring network names are homogenous self.data_sources .iter() - .find_map(|d| Some(d.as_onchain()?.network()?.to_string())) + .find_map(|d| Some(d.network()?.to_string())) .expect("Validated manifest does not have a network defined on any datasource") } pub fn start_blocks(&self) -> Vec { self.data_sources .iter() - .filter_map(|d| Some(d.as_onchain()?.start_block())) + .filter_map(|d| d.start_block()) .collect() } @@ -1003,6 +1078,17 @@ impl UnresolvedSubgraphManifest { ); } + // Validate subgraph datasource constraints + if let Some(error) = UnvalidatedSubgraphManifest::::validate_subgraph_datasources( + &data_sources, + &spec_version, + ) + .into_iter() + .next() + { + return Err(anyhow::Error::from(error).into()); + } + // Check the min_spec_version of each data source against the spec version of the subgraph let min_spec_version_mismatch = data_sources .iter() diff --git a/graph/src/data_source/common.rs b/graph/src/data_source/common.rs new file mode 100644 index 00000000000..a70f0ab8e17 --- /dev/null +++ b/graph/src/data_source/common.rs @@ -0,0 +1,750 @@ +use crate::blockchain::block_stream::EntitySourceOperation; +use crate::prelude::{BlockPtr, Value}; +use crate::{components::link_resolver::LinkResolver, data::value::Word, prelude::Link}; +use anyhow::{anyhow, Context, Error}; +use ethabi::{Address, Contract, Function, LogParam, ParamType, Token}; +use graph_derive::CheapClone; +use lazy_static::lazy_static; +use num_bigint::Sign; +use regex::Regex; +use serde::de; +use serde::Deserialize; +use slog::Logger; +use std::{str::FromStr, sync::Arc}; +use web3::types::{Log, H160}; + +#[derive(Clone, Debug, PartialEq)] +pub struct MappingABI { + pub name: String, + pub contract: Contract, +} + +impl MappingABI { + pub fn function( + &self, + contract_name: &str, + name: &str, + signature: Option<&str>, + ) -> Result<&Function, Error> { + let contract = &self.contract; + let function = match signature { + // Behavior for apiVersion < 0.0.4: look up function by name; for overloaded + // functions this always picks the same overloaded variant, which is incorrect + // and may lead to encoding/decoding errors + None => contract.function(name).with_context(|| { + format!( + "Unknown function \"{}::{}\" called from WASM runtime", + contract_name, name + ) + })?, + + // Behavior for apiVersion >= 0.0.04: look up function by signature of + // the form `functionName(uint256,string) returns (bytes32,string)`; this + // correctly picks the correct variant of an overloaded function + Some(ref signature) => contract + .functions_by_name(name) + .with_context(|| { + format!( + "Unknown function \"{}::{}\" called from WASM runtime", + contract_name, name + ) + })? + .iter() + .find(|f| signature == &f.signature()) + .with_context(|| { + format!( + "Unknown function \"{}::{}\" with signature `{}` \ + called from WASM runtime", + contract_name, name, signature, + ) + })?, + }; + Ok(function) + } +} + +#[derive(Clone, Debug, Hash, Eq, PartialEq, Deserialize)] +pub struct UnresolvedMappingABI { + pub name: String, + pub file: Link, +} + +impl UnresolvedMappingABI { + pub async fn resolve( + self, + resolver: &Arc, + logger: &Logger, + ) -> Result { + let contract_bytes = resolver.cat(logger, &self.file).await.with_context(|| { + format!( + "failed to resolve ABI {} from {}", + self.name, self.file.link + ) + })?; + let contract = Contract::load(&*contract_bytes)?; + Ok(MappingABI { + name: self.name, + contract, + }) + } +} + +/// Internal representation of declared calls. In the manifest that's +/// written as part of an event handler as +/// ```yaml +/// calls: +/// - myCall1: Contract[address].function(arg1, arg2, ...) +/// - .. +/// ``` +/// +/// The `address` and `arg` fields can be either `event.address` or +/// `event.params.`. Each entry under `calls` gets turned into a +/// `CallDcl` +#[derive(Clone, CheapClone, Debug, Default, Hash, Eq, PartialEq)] +pub struct CallDecls { + pub decls: Arc>, + readonly: (), +} + +/// A single call declaration, like `myCall1: +/// Contract[address].function(arg1, arg2, ...)` +#[derive(Clone, Debug, Hash, Eq, PartialEq)] +pub struct CallDecl { + /// A user-defined label + pub label: String, + /// The call expression + pub expr: CallExpr, + readonly: (), +} + +impl CallDecl { + pub fn validate_args(&self) -> Result<(), Error> { + self.expr.validate_args() + } + + pub fn address_for_log(&self, log: &Log, params: &[LogParam]) -> Result { + let address = match &self.expr.address { + CallArg::HexAddress(address) => *address, + CallArg::Ethereum(arg) => match arg { + EthereumArg::Address => log.address, + EthereumArg::Param(name) => { + let value = params + .iter() + .find(|param| ¶m.name == name.as_str()) + .ok_or_else(|| anyhow!("unknown param {name}"))? + .value + .clone(); + value + .into_address() + .ok_or_else(|| anyhow!("param {name} is not an address"))? + } + }, + CallArg::Subgraph(_) => { + return Err(anyhow!( + "Subgraph params are not supported for when declaring calls for event handlers" + )) + } + }; + Ok(address) + } + + pub fn args_for_log(&self, log: &Log, params: &[LogParam]) -> Result, Error> { + self.expr + .args + .iter() + .map(|arg| match arg { + CallArg::HexAddress(address) => Ok(Token::Address(*address)), + CallArg::Ethereum(arg) => match arg { + EthereumArg::Address => Ok(Token::Address(log.address)), + EthereumArg::Param(name) => { + let value = params + .iter() + .find(|param| ¶m.name == name.as_str()) + .ok_or_else(|| anyhow!("unknown param {name}"))? + .value + .clone(); + Ok(value) + } + }, + CallArg::Subgraph(_) => Err(anyhow!( + "Subgraph params are not supported for when declaring calls for event handlers" + )), + }) + .collect() + } + + pub fn get_function(&self, mapping: &dyn FindMappingABI) -> Result { + let contract_name = self.expr.abi.to_string(); + let function_name = self.expr.func.as_str(); + let abi = mapping.find_abi(&contract_name)?; + + // TODO: Handle overloaded functions + // Behavior for apiVersion < 0.0.4: look up function by name; for overloaded + // functions this always picks the same overloaded variant, which is incorrect + // and may lead to encoding/decoding errors + abi.contract + .function(function_name) + .cloned() + .with_context(|| { + format!( + "Unknown function \"{}::{}\" called from WASM runtime", + contract_name, function_name + ) + }) + } + + pub fn address_for_entity_handler( + &self, + entity: &EntitySourceOperation, + ) -> Result { + match &self.expr.address { + // Static hex address - just return it directly + CallArg::HexAddress(address) => Ok(*address), + + // Ethereum params not allowed here + CallArg::Ethereum(_) => Err(anyhow!( + "Ethereum params are not supported for entity handler calls" + )), + + // Look up address from entity parameter + CallArg::Subgraph(SubgraphArg::EntityParam(name)) => { + // Get the value for this parameter + let value = entity + .entity + .get(name.as_str()) + .ok_or_else(|| anyhow!("entity missing required param '{name}'"))?; + + // Make sure it's a bytes value and convert to address + match value { + Value::Bytes(bytes) => { + let address = H160::from_slice(bytes.as_slice()); + Ok(address) + } + _ => Err(anyhow!("param '{name}' must be an address")), + } + } + } + } + + /// Processes arguments for an entity handler, converting them to the expected token types. + /// Returns an error if argument count mismatches or if conversion fails. + pub fn args_for_entity_handler( + &self, + entity: &EntitySourceOperation, + param_types: Vec, + ) -> Result, Error> { + self.validate_entity_handler_args(¶m_types)?; + + self.expr + .args + .iter() + .zip(param_types.into_iter()) + .map(|(arg, expected_type)| { + self.process_entity_handler_arg(arg, &expected_type, entity) + }) + .collect() + } + + /// Validates that the number of provided arguments matches the expected parameter types. + fn validate_entity_handler_args(&self, param_types: &[ParamType]) -> Result<(), Error> { + if self.expr.args.len() != param_types.len() { + return Err(anyhow!( + "mismatched number of arguments: expected {}, got {}", + param_types.len(), + self.expr.args.len() + )); + } + Ok(()) + } + + /// Processes a single entity handler argument based on its type (HexAddress, Ethereum, or Subgraph). + /// Returns error for unsupported Ethereum params. + fn process_entity_handler_arg( + &self, + arg: &CallArg, + expected_type: &ParamType, + entity: &EntitySourceOperation, + ) -> Result { + match arg { + CallArg::HexAddress(address) => self.process_hex_address(*address, expected_type), + CallArg::Ethereum(_) => Err(anyhow!( + "Ethereum params are not supported for entity handler calls" + )), + CallArg::Subgraph(SubgraphArg::EntityParam(name)) => { + self.process_entity_param(name, expected_type, entity) + } + } + } + + /// Converts a hex address to a token, ensuring it matches the expected parameter type. + fn process_hex_address( + &self, + address: H160, + expected_type: &ParamType, + ) -> Result { + match expected_type { + ParamType::Address => Ok(Token::Address(address)), + _ => Err(anyhow!( + "type mismatch: hex address provided for non-address parameter" + )), + } + } + + /// Retrieves and processes an entity parameter, converting it to the expected token type. + fn process_entity_param( + &self, + name: &str, + expected_type: &ParamType, + entity: &EntitySourceOperation, + ) -> Result { + let value = entity + .entity + .get(name) + .ok_or_else(|| anyhow!("entity missing required param '{name}'"))?; + + self.convert_entity_value_to_token(value, expected_type, name) + } + + /// Converts a `Value` to the appropriate `Token` type based on the expected parameter type. + /// Handles various type conversions including primitives, bytes, and arrays. + fn convert_entity_value_to_token( + &self, + value: &Value, + expected_type: &ParamType, + param_name: &str, + ) -> Result { + match (expected_type, value) { + (ParamType::Address, Value::Bytes(b)) => { + Ok(Token::Address(H160::from_slice(b.as_slice()))) + } + (ParamType::Bytes, Value::Bytes(b)) => Ok(Token::Bytes(b.as_ref().to_vec())), + (ParamType::FixedBytes(size), Value::Bytes(b)) if b.len() == *size => { + Ok(Token::FixedBytes(b.as_ref().to_vec())) + } + (ParamType::String, Value::String(s)) => Ok(Token::String(s.to_string())), + (ParamType::Bool, Value::Bool(b)) => Ok(Token::Bool(*b)), + (ParamType::Int(_), Value::Int(i)) => Ok(Token::Int((*i).into())), + (ParamType::Int(_), Value::Int8(i)) => Ok(Token::Int((*i).into())), + (ParamType::Int(_), Value::BigInt(i)) => Ok(Token::Int(i.to_signed_u256())), + (ParamType::Uint(_), Value::Int(i)) if *i >= 0 => Ok(Token::Uint((*i).into())), + (ParamType::Uint(_), Value::BigInt(i)) if i.sign() == Sign::Plus => { + Ok(Token::Uint(i.to_unsigned_u256())) + } + (ParamType::Array(inner_type), Value::List(values)) => { + self.process_entity_array_values(values, inner_type.as_ref(), param_name) + } + _ => Err(anyhow!( + "type mismatch for param '{param_name}': cannot convert {:?} to {:?}", + value, + expected_type + )), + } + } + + fn process_entity_array_values( + &self, + values: &[Value], + inner_type: &ParamType, + param_name: &str, + ) -> Result { + let tokens: Result, Error> = values + .iter() + .enumerate() + .map(|(idx, v)| { + self.convert_entity_value_to_token(v, inner_type, &format!("{param_name}[{idx}]")) + }) + .collect(); + Ok(Token::Array(tokens?)) + } +} + +impl<'de> de::Deserialize<'de> for CallDecls { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + let decls: std::collections::HashMap = + de::Deserialize::deserialize(deserializer)?; + let decls = decls + .into_iter() + .map(|(name, expr)| { + expr.parse::().map(|expr| CallDecl { + label: name, + expr, + readonly: (), + }) + }) + .collect::>() + .map(|decls| Arc::new(decls)) + .map_err(de::Error::custom)?; + Ok(CallDecls { + decls, + readonly: (), + }) + } +} + +#[derive(Clone, Debug, Hash, Eq, PartialEq)] +pub struct CallExpr { + pub abi: Word, + pub address: CallArg, + pub func: Word, + pub args: Vec, + readonly: (), +} + +impl CallExpr { + fn validate_args(&self) -> Result<(), anyhow::Error> { + // Consider address along with args for checking Ethereum/Subgraph mixing + let has_ethereum = matches!(self.address, CallArg::Ethereum(_)) + || self + .args + .iter() + .any(|arg| matches!(arg, CallArg::Ethereum(_))); + + let has_subgraph = matches!(self.address, CallArg::Subgraph(_)) + || self + .args + .iter() + .any(|arg| matches!(arg, CallArg::Subgraph(_))); + + if has_ethereum && has_subgraph { + return Err(anyhow!( + "Cannot mix Ethereum and Subgraph args in the same call expression" + )); + } + + Ok(()) + } +} +/// Parse expressions of the form `Contract[address].function(arg1, arg2, +/// ...)` where the `address` and the args are either `event.address` or +/// `event.params.`. +/// +/// The parser is pretty awful as it generates error messages that aren't +/// very helpful. We should replace all this with a real parser, most likely +/// `combine` which is what `graphql_parser` uses +impl FromStr for CallExpr { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + lazy_static! { + static ref RE: Regex = Regex::new( + r"(?x) + (?P[a-zA-Z0-9_]+)\[ + (?P
[^]]+)\] + \. + (?P[a-zA-Z0-9_]+)\( + (?P[^)]*) + \)" + ) + .unwrap(); + } + let x = RE + .captures(s) + .ok_or_else(|| anyhow!("invalid call expression `{s}`"))?; + let abi = Word::from(x.name("abi").unwrap().as_str()); + let address = x.name("address").unwrap().as_str().parse()?; + let func = Word::from(x.name("func").unwrap().as_str()); + let args: Vec = x + .name("args") + .unwrap() + .as_str() + .split(',') + .filter(|s| !s.is_empty()) + .map(|s| s.trim().parse::()) + .collect::>()?; + + let call_expr = CallExpr { + abi, + address, + func, + args, + readonly: (), + }; + + // Validate the arguments after constructing the CallExpr + call_expr.validate_args()?; + + Ok(call_expr) + } +} + +#[derive(Clone, Debug, Hash, Eq, PartialEq)] +pub enum CallArg { + // Hard-coded hex address + HexAddress(Address), + // Ethereum-specific variants + Ethereum(EthereumArg), + // Subgraph datasource specific variants + Subgraph(SubgraphArg), +} + +#[derive(Clone, Debug, Hash, Eq, PartialEq)] +pub enum EthereumArg { + Address, + Param(Word), +} + +#[derive(Clone, Debug, Hash, Eq, PartialEq)] +pub enum SubgraphArg { + EntityParam(Word), +} + +lazy_static! { + // Matches a 40-character hexadecimal string prefixed with '0x', typical for Ethereum addresses + static ref ADDR_RE: Regex = Regex::new(r"^0x[0-9a-fA-F]{40}$").unwrap(); +} + +impl FromStr for CallArg { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + if ADDR_RE.is_match(s) { + if let Ok(parsed_address) = Address::from_str(s) { + return Ok(CallArg::HexAddress(parsed_address)); + } + } + + let mut parts = s.split('.'); + match (parts.next(), parts.next(), parts.next()) { + (Some("event"), Some("address"), None) => Ok(CallArg::Ethereum(EthereumArg::Address)), + (Some("event"), Some("params"), Some(param)) => { + Ok(CallArg::Ethereum(EthereumArg::Param(Word::from(param)))) + } + (Some("entity"), Some(param), None) => Ok(CallArg::Subgraph(SubgraphArg::EntityParam( + Word::from(param), + ))), + _ => Err(anyhow!("invalid call argument `{}`", s)), + } + } +} + +pub trait FindMappingABI { + fn find_abi(&self, abi_name: &str) -> Result, Error>; +} + +#[derive(Clone, Debug, PartialEq)] +pub struct DeclaredCall { + /// The user-supplied label from the manifest + label: String, + contract_name: String, + address: Address, + function: Function, + args: Vec, +} + +impl DeclaredCall { + pub fn from_log_trigger( + mapping: &dyn FindMappingABI, + call_decls: &CallDecls, + log: &Log, + params: &[LogParam], + ) -> Result, anyhow::Error> { + Self::create_calls(mapping, call_decls, |decl, _| { + Ok(( + decl.address_for_log(log, params)?, + decl.args_for_log(log, params)?, + )) + }) + } + + pub fn from_entity_trigger( + mapping: &dyn FindMappingABI, + call_decls: &CallDecls, + entity: &EntitySourceOperation, + ) -> Result, anyhow::Error> { + Self::create_calls(mapping, call_decls, |decl, function| { + let param_types = function + .inputs + .iter() + .map(|param| param.kind.clone()) + .collect::>(); + + Ok(( + decl.address_for_entity_handler(entity)?, + decl.args_for_entity_handler(entity, param_types) + .context(format!( + "Failed to parse arguments for call to function \"{}\" of contract \"{}\"", + decl.expr.func.as_str(), + decl.expr.abi.to_string() + ))?, + )) + }) + } + + fn create_calls( + mapping: &dyn FindMappingABI, + call_decls: &CallDecls, + get_address_and_args: F, + ) -> Result, anyhow::Error> + where + F: Fn(&CallDecl, &Function) -> Result<(Address, Vec), anyhow::Error>, + { + let mut calls = Vec::new(); + for decl in call_decls.decls.iter() { + let contract_name = decl.expr.abi.to_string(); + let function = decl.get_function(mapping)?; + let (address, args) = get_address_and_args(decl, &function)?; + + calls.push(DeclaredCall { + label: decl.label.clone(), + contract_name, + address, + function: function.clone(), + args, + }); + } + Ok(calls) + } + + pub fn as_eth_call(self, block_ptr: BlockPtr, gas: Option) -> (ContractCall, String) { + ( + ContractCall { + contract_name: self.contract_name, + address: self.address, + block_ptr, + function: self.function, + args: self.args, + gas, + }, + self.label, + ) + } +} +#[derive(Clone, Debug)] +pub struct ContractCall { + pub contract_name: String, + pub address: Address, + pub block_ptr: BlockPtr, + pub function: Function, + pub args: Vec, + pub gas: Option, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_ethereum_call_expr() { + let expr: CallExpr = "ERC20[event.address].balanceOf(event.params.token)" + .parse() + .unwrap(); + assert_eq!(expr.abi, "ERC20"); + assert_eq!(expr.address, CallArg::Ethereum(EthereumArg::Address)); + assert_eq!(expr.func, "balanceOf"); + assert_eq!( + expr.args, + vec![CallArg::Ethereum(EthereumArg::Param("token".into()))] + ); + + let expr: CallExpr = + "Pool[event.params.pool].fees(event.params.token0, event.params.token1)" + .parse() + .unwrap(); + assert_eq!(expr.abi, "Pool"); + assert_eq!( + expr.address, + CallArg::Ethereum(EthereumArg::Param("pool".into())) + ); + assert_eq!(expr.func, "fees"); + assert_eq!( + expr.args, + vec![ + CallArg::Ethereum(EthereumArg::Param("token0".into())), + CallArg::Ethereum(EthereumArg::Param("token1".into())) + ] + ); + } + + #[test] + fn test_subgraph_call_expr() { + let expr: CallExpr = "Token[entity.id].symbol()".parse().unwrap(); + assert_eq!(expr.abi, "Token"); + assert_eq!( + expr.address, + CallArg::Subgraph(SubgraphArg::EntityParam("id".into())) + ); + assert_eq!(expr.func, "symbol"); + assert_eq!(expr.args, vec![]); + + let expr: CallExpr = "Pair[entity.pair].getReserves(entity.token0)" + .parse() + .unwrap(); + assert_eq!(expr.abi, "Pair"); + assert_eq!( + expr.address, + CallArg::Subgraph(SubgraphArg::EntityParam("pair".into())) + ); + assert_eq!(expr.func, "getReserves"); + assert_eq!( + expr.args, + vec![CallArg::Subgraph(SubgraphArg::EntityParam("token0".into()))] + ); + } + + #[test] + fn test_hex_address_call_expr() { + let addr = "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF"; + let hex_address = CallArg::HexAddress(web3::types::H160::from_str(addr).unwrap()); + + // Test HexAddress in address position + let expr: CallExpr = format!("Pool[{}].growth()", addr).parse().unwrap(); + assert_eq!(expr.abi, "Pool"); + assert_eq!(expr.address, hex_address.clone()); + assert_eq!(expr.func, "growth"); + assert_eq!(expr.args, vec![]); + + // Test HexAddress in argument position + let expr: CallExpr = format!("Pool[event.address].approve({}, event.params.amount)", addr) + .parse() + .unwrap(); + assert_eq!(expr.abi, "Pool"); + assert_eq!(expr.address, CallArg::Ethereum(EthereumArg::Address)); + assert_eq!(expr.func, "approve"); + assert_eq!(expr.args.len(), 2); + assert_eq!(expr.args[0], hex_address); + } + + #[test] + fn test_invalid_call_args() { + // Invalid hex address + assert!("Pool[0xinvalid].test()".parse::().is_err()); + + // Invalid event path + assert!("Pool[event.invalid].test()".parse::().is_err()); + + // Invalid entity path + assert!("Pool[entity].test()".parse::().is_err()); + + // Empty address + assert!("Pool[].test()".parse::().is_err()); + + // Invalid parameter format + assert!("Pool[event.params].test()".parse::().is_err()); + } + + #[test] + fn test_from_str() { + // Test valid hex address + let addr = "0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF"; + let arg = CallArg::from_str(addr).unwrap(); + assert!(matches!(arg, CallArg::HexAddress(_))); + + // Test Ethereum Address + let arg = CallArg::from_str("event.address").unwrap(); + assert!(matches!(arg, CallArg::Ethereum(EthereumArg::Address))); + + // Test Ethereum Param + let arg = CallArg::from_str("event.params.token").unwrap(); + assert!(matches!(arg, CallArg::Ethereum(EthereumArg::Param(_)))); + + // Test Subgraph EntityParam + let arg = CallArg::from_str("entity.token").unwrap(); + assert!(matches!( + arg, + CallArg::Subgraph(SubgraphArg::EntityParam(_)) + )); + } +} diff --git a/graph/src/data_source/mod.rs b/graph/src/data_source/mod.rs index a38148b25fe..4c56e99ea9b 100644 --- a/graph/src/data_source/mod.rs +++ b/graph/src/data_source/mod.rs @@ -1,6 +1,9 @@ pub mod causality_region; +pub mod common; pub mod offchain; +pub mod subgraph; +pub use self::DataSource as DataSourceEnum; pub use causality_region::CausalityRegion; #[cfg(test)] @@ -16,7 +19,7 @@ use crate::{ link_resolver::LinkResolver, store::{BlockNumber, StoredDynamicDataSource}, }, - data_source::offchain::OFFCHAIN_KINDS, + data_source::{offchain::OFFCHAIN_KINDS, subgraph::SUBGRAPH_DS_KIND}, prelude::{CheapClone as _, DataSourceContext}, schema::{EntityType, InputSchema}, }; @@ -35,6 +38,7 @@ use thiserror::Error; pub enum DataSource { Onchain(C::DataSource), Offchain(offchain::DataSource), + Subgraph(subgraph::DataSource), } #[derive(Error, Debug)] @@ -89,6 +93,23 @@ impl DataSource { match self { Self::Onchain(ds) => Some(ds), Self::Offchain(_) => None, + Self::Subgraph(_) => None, + } + } + + pub fn as_subgraph(&self) -> Option<&subgraph::DataSource> { + match self { + Self::Onchain(_) => None, + Self::Offchain(_) => None, + Self::Subgraph(ds) => Some(ds), + } + } + + pub fn is_chain_based(&self) -> bool { + match self { + Self::Onchain(_) => true, + Self::Offchain(_) => false, + Self::Subgraph(_) => true, } } @@ -96,6 +117,23 @@ impl DataSource { match self { Self::Onchain(_) => None, Self::Offchain(ds) => Some(ds), + Self::Subgraph(_) => None, + } + } + + pub fn network(&self) -> Option<&str> { + match self { + DataSourceEnum::Onchain(ds) => ds.network(), + DataSourceEnum::Offchain(_) => None, + DataSourceEnum::Subgraph(ds) => ds.network(), + } + } + + pub fn start_block(&self) -> Option { + match self { + DataSourceEnum::Onchain(ds) => Some(ds.start_block()), + DataSourceEnum::Offchain(_) => None, + DataSourceEnum::Subgraph(ds) => Some(ds.source.start_block), } } @@ -111,6 +149,7 @@ impl DataSource { match self { Self::Onchain(ds) => ds.address().map(ToOwned::to_owned), Self::Offchain(ds) => ds.address(), + Self::Subgraph(ds) => ds.address(), } } @@ -118,6 +157,7 @@ impl DataSource { match self { Self::Onchain(ds) => ds.name(), Self::Offchain(ds) => &ds.name, + Self::Subgraph(ds) => &ds.name, } } @@ -125,6 +165,7 @@ impl DataSource { match self { Self::Onchain(ds) => ds.kind().to_owned(), Self::Offchain(ds) => ds.kind.to_string(), + Self::Subgraph(ds) => ds.kind.clone(), } } @@ -132,6 +173,7 @@ impl DataSource { match self { Self::Onchain(ds) => ds.min_spec_version(), Self::Offchain(ds) => ds.min_spec_version(), + Self::Subgraph(ds) => ds.min_spec_version(), } } @@ -139,6 +181,7 @@ impl DataSource { match self { Self::Onchain(ds) => ds.end_block(), Self::Offchain(_) => None, + Self::Subgraph(_) => None, } } @@ -146,6 +189,7 @@ impl DataSource { match self { Self::Onchain(ds) => ds.creation_block(), Self::Offchain(ds) => ds.creation_block, + Self::Subgraph(ds) => ds.creation_block, } } @@ -153,6 +197,7 @@ impl DataSource { match self { Self::Onchain(ds) => ds.context(), Self::Offchain(ds) => ds.context.clone(), + Self::Subgraph(ds) => ds.context.clone(), } } @@ -160,6 +205,7 @@ impl DataSource { match self { Self::Onchain(ds) => ds.api_version(), Self::Offchain(ds) => ds.mapping.api_version.clone(), + Self::Subgraph(ds) => ds.mapping.api_version.clone(), } } @@ -167,6 +213,7 @@ impl DataSource { match self { Self::Onchain(ds) => ds.runtime(), Self::Offchain(ds) => Some(ds.mapping.runtime.cheap_clone()), + Self::Subgraph(ds) => Some(ds.mapping.runtime.cheap_clone()), } } @@ -176,6 +223,7 @@ impl DataSource { // been enforced. Self::Onchain(_) => EntityTypeAccess::Any, Self::Offchain(ds) => EntityTypeAccess::Restriced(ds.mapping.entities.clone()), + Self::Subgraph(_) => EntityTypeAccess::Any, } } @@ -183,6 +231,7 @@ impl DataSource { match self { Self::Onchain(ds) => ds.handler_kinds(), Self::Offchain(ds) => vec![ds.handler_kind()].into_iter().collect(), + Self::Subgraph(ds) => vec![ds.handler_kind()].into_iter().collect(), } } @@ -190,6 +239,7 @@ impl DataSource { match self { Self::Onchain(ds) => ds.has_declared_calls(), Self::Offchain(_) => false, + Self::Subgraph(_) => false, } } @@ -207,8 +257,15 @@ impl DataSource { (Self::Offchain(ds), TriggerData::Offchain(trigger)) => { Ok(ds.match_and_decode(trigger)) } + (Self::Subgraph(ds), TriggerData::Subgraph(trigger)) => { + ds.match_and_decode(block, trigger) + } (Self::Onchain(_), TriggerData::Offchain(_)) - | (Self::Offchain(_), TriggerData::Onchain(_)) => Ok(None), + | (Self::Offchain(_), TriggerData::Onchain(_)) + | (Self::Onchain(_), TriggerData::Subgraph(_)) + | (Self::Offchain(_), TriggerData::Subgraph(_)) + | (Self::Subgraph(_), TriggerData::Onchain(_)) + | (Self::Subgraph(_), TriggerData::Offchain(_)) => Ok(None), } } @@ -224,6 +281,7 @@ impl DataSource { match self { Self::Onchain(ds) => ds.as_stored_dynamic_data_source(), Self::Offchain(ds) => ds.as_stored_dynamic_data_source(), + Self::Subgraph(_) => todo!(), // TODO(krishna) } } @@ -240,6 +298,7 @@ impl DataSource { offchain::DataSource::from_stored_dynamic_data_source(template, stored) .map(DataSource::Offchain) } + DataSourceTemplate::Subgraph(_) => todo!(), // TODO(krishna) } } @@ -247,6 +306,7 @@ impl DataSource { match self { Self::Onchain(ds) => ds.validate(spec_version), Self::Offchain(_) => vec![], + Self::Subgraph(_) => vec![], // TODO(krishna) } } @@ -254,6 +314,7 @@ impl DataSource { match self { Self::Onchain(_) => CausalityRegion::ONCHAIN, Self::Offchain(ds) => ds.causality_region, + Self::Subgraph(_) => CausalityRegion::ONCHAIN, } } } @@ -262,6 +323,7 @@ impl DataSource { pub enum UnresolvedDataSource { Onchain(C::UnresolvedDataSource), Offchain(offchain::UnresolvedDataSource), + Subgraph(subgraph::UnresolvedDataSource), } impl UnresolvedDataSource { @@ -276,6 +338,10 @@ impl UnresolvedDataSource { .resolve(resolver, logger, manifest_idx) .await .map(DataSource::Onchain), + Self::Subgraph(unresolved) => unresolved + .resolve::(resolver, logger, manifest_idx) + .await + .map(DataSource::Subgraph), Self::Offchain(_unresolved) => { anyhow::bail!( "static file data sources are not yet supported, \\ @@ -299,6 +365,7 @@ pub struct DataSourceTemplateInfo { pub enum DataSourceTemplate { Onchain(C::DataSourceTemplate), Offchain(offchain::DataSourceTemplate), + Subgraph(subgraph::DataSourceTemplate), } impl DataSourceTemplate { @@ -306,6 +373,7 @@ impl DataSourceTemplate { match self { DataSourceTemplate::Onchain(template) => template.info(), DataSourceTemplate::Offchain(template) => template.clone().into(), + DataSourceTemplate::Subgraph(template) => template.clone().into(), } } @@ -313,6 +381,7 @@ impl DataSourceTemplate { match self { Self::Onchain(ds) => Some(ds), Self::Offchain(_) => None, + Self::Subgraph(_) => todo!(), // TODO(krishna) } } @@ -320,6 +389,7 @@ impl DataSourceTemplate { match self { Self::Onchain(_) => None, Self::Offchain(t) => Some(t), + Self::Subgraph(_) => todo!(), // TODO(krishna) } } @@ -327,6 +397,7 @@ impl DataSourceTemplate { match self { Self::Onchain(ds) => Some(ds), Self::Offchain(_) => None, + Self::Subgraph(_) => todo!(), // TODO(krishna) } } @@ -334,6 +405,7 @@ impl DataSourceTemplate { match self { Self::Onchain(ds) => &ds.name(), Self::Offchain(ds) => &ds.name, + Self::Subgraph(ds) => &ds.name, } } @@ -341,6 +413,7 @@ impl DataSourceTemplate { match self { Self::Onchain(ds) => ds.api_version(), Self::Offchain(ds) => ds.mapping.api_version.clone(), + Self::Subgraph(ds) => ds.mapping.api_version.clone(), } } @@ -348,6 +421,7 @@ impl DataSourceTemplate { match self { Self::Onchain(ds) => ds.runtime(), Self::Offchain(ds) => Some(ds.mapping.runtime.clone()), + Self::Subgraph(ds) => Some(ds.mapping.runtime.clone()), } } @@ -355,6 +429,7 @@ impl DataSourceTemplate { match self { Self::Onchain(ds) => ds.manifest_idx(), Self::Offchain(ds) => ds.manifest_idx, + Self::Subgraph(ds) => ds.manifest_idx, } } @@ -362,6 +437,7 @@ impl DataSourceTemplate { match self { Self::Onchain(ds) => ds.kind().to_string(), Self::Offchain(ds) => ds.kind.to_string(), + Self::Subgraph(ds) => ds.kind.clone(), } } } @@ -370,6 +446,7 @@ impl DataSourceTemplate { pub enum UnresolvedDataSourceTemplate { Onchain(C::UnresolvedDataSourceTemplate), Offchain(offchain::UnresolvedDataSourceTemplate), + Subgraph(subgraph::UnresolvedDataSourceTemplate), } impl Default for UnresolvedDataSourceTemplate { @@ -395,6 +472,10 @@ impl UnresolvedDataSourceTemplate { .resolve(resolver, logger, manifest_idx, schema) .await .map(DataSourceTemplate::Offchain), + Self::Subgraph(ds) => ds + .resolve(resolver, logger, manifest_idx) + .await + .map(DataSourceTemplate::Subgraph), } } } @@ -475,6 +556,7 @@ impl TriggerWithHandler { pub enum TriggerData { Onchain(C::TriggerData), Offchain(offchain::TriggerData), + Subgraph(subgraph::TriggerData), } impl TriggerData { @@ -482,6 +564,7 @@ impl TriggerData { match self { Self::Onchain(trigger) => trigger.error_context(), Self::Offchain(trigger) => format!("{:?}", trigger.source), + Self::Subgraph(trigger) => format!("{:?}", trigger.source), } } } @@ -490,6 +573,7 @@ impl TriggerData { pub enum MappingTrigger { Onchain(C::MappingTrigger), Offchain(offchain::TriggerData), + Subgraph(subgraph::MappingEntityTrigger), } impl MappingTrigger { @@ -497,6 +581,7 @@ impl MappingTrigger { match self { Self::Onchain(trigger) => Some(trigger.error_context()), Self::Offchain(_) => None, // TODO: Add error context for offchain triggers + Self::Subgraph(_) => None, // TODO(krishna) } } @@ -504,6 +589,7 @@ impl MappingTrigger { match self { Self::Onchain(trigger) => Some(trigger), Self::Offchain(_) => None, + Self::Subgraph(_) => None, // TODO(krishna) } } } @@ -515,6 +601,7 @@ macro_rules! clone_data_source { match self { Self::Onchain(ds) => Self::Onchain(ds.clone()), Self::Offchain(ds) => Self::Offchain(ds.clone()), + Self::Subgraph(ds) => Self::Subgraph(ds.clone()), } } } @@ -541,6 +628,10 @@ macro_rules! deserialize_data_source { offchain::$t::deserialize(map.into_deserializer()) .map_err(serde::de::Error::custom) .map($t::Offchain) + } else if SUBGRAPH_DS_KIND == kind { + subgraph::$t::deserialize(map.into_deserializer()) + .map_err(serde::de::Error::custom) + .map($t::Subgraph) } else if (&C::KIND.to_string() == kind) || C::ALIASES.contains(&kind) { C::$t::deserialize(map.into_deserializer()) .map_err(serde::de::Error::custom) diff --git a/graph/src/data_source/subgraph.rs b/graph/src/data_source/subgraph.rs new file mode 100644 index 00000000000..9e120a4c82c --- /dev/null +++ b/graph/src/data_source/subgraph.rs @@ -0,0 +1,495 @@ +use crate::{ + blockchain::{block_stream::EntitySourceOperation, Block, Blockchain}, + components::{link_resolver::LinkResolver, store::BlockNumber}, + data::{ + subgraph::{ + calls_host_fn, SubgraphManifest, UnresolvedSubgraphManifest, LATEST_VERSION, + SPEC_VERSION_1_3_0, + }, + value::Word, + }, + data_source::{self, common::DeclaredCall}, + ensure, + prelude::{CheapClone, DataSourceContext, DeploymentHash, Link}, + schema::TypeKind, +}; +use anyhow::{anyhow, Context, Error, Result}; +use futures03::{stream::FuturesOrdered, TryStreamExt}; +use serde::Deserialize; +use slog::{info, Logger}; +use std::{fmt, sync::Arc}; + +use super::{ + common::{CallDecls, FindMappingABI, MappingABI, UnresolvedMappingABI}, + DataSourceTemplateInfo, TriggerWithHandler, +}; + +pub const SUBGRAPH_DS_KIND: &str = "subgraph"; + +const ENTITY_HANDLER_KINDS: &str = "entity"; + +#[derive(Debug, Clone)] +pub struct DataSource { + pub kind: String, + pub name: String, + pub network: String, + pub manifest_idx: u32, + pub source: Source, + pub mapping: Mapping, + pub context: Arc>, + pub creation_block: Option, +} + +impl DataSource { + pub fn new( + kind: String, + name: String, + network: String, + manifest_idx: u32, + source: Source, + mapping: Mapping, + context: Arc>, + creation_block: Option, + ) -> Self { + Self { + kind, + name, + network, + manifest_idx, + source, + mapping, + context, + creation_block, + } + } + + pub fn min_spec_version(&self) -> semver::Version { + SPEC_VERSION_1_3_0 + } + + pub fn handler_kind(&self) -> &str { + ENTITY_HANDLER_KINDS + } + + pub fn network(&self) -> Option<&str> { + Some(&self.network) + } + + pub fn match_and_decode( + &self, + block: &Arc, + trigger: &TriggerData, + ) -> Result>>> { + if self.source.address != trigger.source { + return Ok(None); + } + + let mut matching_handlers: Vec<_> = self + .mapping + .handlers + .iter() + .filter(|handler| handler.entity == trigger.entity_type()) + .collect(); + + // Get the matching handler if any + let handler = match matching_handlers.pop() { + Some(handler) => handler, + None => return Ok(None), + }; + + ensure!( + matching_handlers.is_empty(), + format!( + "Multiple handlers defined for entity `{}`, only one is supported", + trigger.entity_type() + ) + ); + + let calls = + DeclaredCall::from_entity_trigger(&self.mapping, &handler.calls, &trigger.entity)?; + let mapping_trigger = MappingEntityTrigger { + data: trigger.clone(), + calls, + }; + + Ok(Some(TriggerWithHandler::new( + data_source::MappingTrigger::Subgraph(mapping_trigger), + handler.handler.clone(), + block.ptr(), + block.timestamp(), + ))) + } + + pub fn address(&self) -> Option> { + Some(self.source.address().to_bytes()) + } + + pub fn source_subgraph(&self) -> DeploymentHash { + self.source.address() + } +} + +pub type Base64 = Word; + +#[derive(Clone, Debug, Default, Hash, Eq, PartialEq, Deserialize)] +pub struct Source { + pub address: DeploymentHash, + #[serde(default)] + pub start_block: BlockNumber, +} + +impl Source { + /// The concept of an address may or not make sense for a subgraph data source, but graph node + /// will use this in a few places where some sort of not necessarily unique id is useful: + /// 1. This is used as the value to be returned to mappings from the `dataSource.address()` host + /// function, so changing this is a breaking change. + /// 2. This is used to match with triggers with hosts in `fn hosts_for_trigger`, so make sure + /// the `source` of the data source is equal the `source` of the `TriggerData`. + pub fn address(&self) -> DeploymentHash { + self.address.clone() + } +} + +#[derive(Clone, Debug)] +pub struct Mapping { + pub language: String, + pub api_version: semver::Version, + pub abis: Vec>, + pub entities: Vec, + pub handlers: Vec, + pub runtime: Arc>, + pub link: Link, +} + +impl Mapping { + pub fn requires_archive(&self) -> anyhow::Result { + calls_host_fn(&self.runtime, "ethereum.call") + } +} + +impl FindMappingABI for Mapping { + fn find_abi(&self, abi_name: &str) -> Result, Error> { + Ok(self + .abis + .iter() + .find(|abi| abi.name == abi_name) + .ok_or_else(|| anyhow!("No ABI entry with name `{}` found", abi_name))? + .cheap_clone()) + } +} + +#[derive(Clone, Debug, Hash, Eq, PartialEq, Deserialize)] +pub struct EntityHandler { + pub handler: String, + pub entity: String, + #[serde(default)] + pub calls: CallDecls, +} + +#[derive(Clone, Debug, Default, Eq, PartialEq, Deserialize)] +pub struct UnresolvedDataSource { + pub kind: String, + pub name: String, + pub network: String, + pub source: UnresolvedSource, + pub mapping: UnresolvedMapping, + pub context: Option, +} + +#[derive(Clone, Debug, Default, Hash, Eq, PartialEq, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct UnresolvedSource { + address: DeploymentHash, + #[serde(default)] + start_block: BlockNumber, +} + +#[derive(Clone, Debug, Default, Hash, Eq, PartialEq, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct UnresolvedMapping { + pub api_version: String, + pub language: String, + pub file: Link, + pub handlers: Vec, + pub abis: Option>, + pub entities: Vec, +} + +impl UnresolvedDataSource { + fn validate_mapping_entities( + mapping_entities: &[String], + source_manifest: &SubgraphManifest, + ) -> Result<(), Error> { + for entity in mapping_entities { + let type_kind = source_manifest.schema.kind_of_declared_type(&entity); + + match type_kind { + Some(TypeKind::Interface) => { + return Err(anyhow!( + "Entity {} is an interface and cannot be used as a mapping entity", + entity + )); + } + Some(TypeKind::Aggregation) => { + return Err(anyhow!( + "Entity {} is an aggregation and cannot be used as a mapping entity", + entity + )); + } + None => { + return Err(anyhow!("Entity {} not found in source manifest", entity)); + } + Some(TypeKind::Object) => {} + } + } + Ok(()) + } + + async fn resolve_source_manifest( + &self, + resolver: &Arc, + logger: &Logger, + ) -> Result>, Error> { + let source_raw = resolver + .cat(logger, &self.source.address.to_ipfs_link()) + .await + .context("Failed to resolve source subgraph manifest")?; + + let source_raw: serde_yaml::Mapping = serde_yaml::from_slice(&source_raw) + .context("Failed to parse source subgraph manifest as YAML")?; + + let deployment_hash = self.source.address.clone(); + + let source_manifest = UnresolvedSubgraphManifest::::parse(deployment_hash, source_raw) + .context("Failed to parse source subgraph manifest")?; + + source_manifest + .resolve(resolver, logger, LATEST_VERSION.clone()) + .await + .context("Failed to resolve source subgraph manifest") + .map(Arc::new) + } + + #[allow(dead_code)] + pub(super) async fn resolve( + self, + resolver: &Arc, + logger: &Logger, + manifest_idx: u32, + ) -> Result { + info!(logger, "Resolve subgraph data source"; + "name" => &self.name, + "kind" => &self.kind, + "source" => format_args!("{:?}", &self.source), + ); + + let kind = self.kind.clone(); + let source_manifest = self.resolve_source_manifest::(resolver, logger).await?; + let source_spec_version = &source_manifest.spec_version; + + if source_manifest + .data_sources + .iter() + .any(|ds| matches!(ds, crate::data_source::DataSource::Subgraph(_))) + { + return Err(anyhow!("Nested subgraph data sources are not supported.")); + } + + if source_spec_version < &SPEC_VERSION_1_3_0 { + return Err(anyhow!( + "Source subgraph manifest spec version {} is not supported, minimum supported version is {}", + source_spec_version, + SPEC_VERSION_1_3_0 + )); + } + + let pruning_enabled = match source_manifest.indexer_hints.as_ref() { + None => false, + Some(hints) => hints.prune.is_some(), + }; + + if pruning_enabled { + return Err(anyhow!( + "Pruning is enabled for source subgraph, which is not supported" + )); + } + + let mapping_entities: Vec = self + .mapping + .handlers + .iter() + .map(|handler| handler.entity.clone()) + .collect(); + + Self::validate_mapping_entities(&mapping_entities, &source_manifest)?; + + let source = Source { + address: self.source.address, + start_block: self.source.start_block, + }; + + Ok(DataSource { + manifest_idx, + kind, + name: self.name, + network: self.network, + source, + mapping: self.mapping.resolve(resolver, logger).await?, + context: Arc::new(self.context), + creation_block: None, + }) + } +} + +impl UnresolvedMapping { + pub async fn resolve( + self, + resolver: &Arc, + logger: &Logger, + ) -> Result { + info!(logger, "Resolve subgraph ds mapping"; "link" => &self.file.link); + + // Resolve each ABI and collect the results + let abis = match self.abis { + Some(abis) => { + abis.into_iter() + .map(|unresolved_abi| { + let resolver = Arc::clone(resolver); + let logger = logger.clone(); + async move { + let resolved_abi = unresolved_abi.resolve(&resolver, &logger).await?; + Ok::<_, Error>(Arc::new(resolved_abi)) + } + }) + .collect::>() + .try_collect::>() + .await? + } + None => Vec::new(), + }; + + Ok(Mapping { + language: self.language, + api_version: semver::Version::parse(&self.api_version)?, + entities: self.entities, + handlers: self.handlers, + abis, + runtime: Arc::new(resolver.cat(logger, &self.file).await?), + link: self.file, + }) + } +} + +#[derive(Clone, Debug, Deserialize)] +pub struct UnresolvedDataSourceTemplate { + pub kind: String, + pub network: Option, + pub name: String, + pub mapping: UnresolvedMapping, +} + +#[derive(Clone, Debug)] +pub struct DataSourceTemplate { + pub kind: String, + pub network: Option, + pub name: String, + pub manifest_idx: u32, + pub mapping: Mapping, +} + +impl Into for DataSourceTemplate { + fn into(self) -> DataSourceTemplateInfo { + let DataSourceTemplate { + kind, + network: _, + name, + manifest_idx, + mapping, + } = self; + + DataSourceTemplateInfo { + api_version: mapping.api_version.clone(), + runtime: Some(mapping.runtime), + name, + manifest_idx: Some(manifest_idx), + kind: kind.to_string(), + } + } +} + +impl UnresolvedDataSourceTemplate { + pub async fn resolve( + self, + resolver: &Arc, + logger: &Logger, + manifest_idx: u32, + ) -> Result { + let kind = self.kind; + + let mapping = self + .mapping + .resolve(resolver, logger) + .await + .with_context(|| format!("failed to resolve data source template {}", self.name))?; + + Ok(DataSourceTemplate { + kind, + network: self.network, + name: self.name, + manifest_idx, + mapping, + }) + } +} + +#[derive(Clone, PartialEq, Debug)] +pub struct MappingEntityTrigger { + pub data: TriggerData, + pub calls: Vec, +} + +#[derive(Clone, PartialEq, Eq)] +pub struct TriggerData { + pub source: DeploymentHash, + pub entity: EntitySourceOperation, + pub source_idx: u32, +} + +impl TriggerData { + pub fn new(source: DeploymentHash, entity: EntitySourceOperation, source_idx: u32) -> Self { + Self { + source, + entity, + source_idx, + } + } + + pub fn entity_type(&self) -> &str { + self.entity.entity_type.as_str() + } +} + +impl Ord for TriggerData { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + match self.source_idx.cmp(&other.source_idx) { + std::cmp::Ordering::Equal => self.entity.vid.cmp(&other.entity.vid), + ord => ord, + } + } +} + +impl PartialOrd for TriggerData { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl fmt::Debug for TriggerData { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "TriggerData {{ source: {:?}, entity: {:?} }}", + self.source, self.entity, + ) + } +} diff --git a/graph/src/env/mod.rs b/graph/src/env/mod.rs index b97e44ef9a1..4383ce17b5c 100644 --- a/graph/src/env/mod.rs +++ b/graph/src/env/mod.rs @@ -240,6 +240,13 @@ pub struct EnvVars { pub firehose_disable_extended_blocks_for_chains: Vec, pub block_write_capacity: usize, + + /// Set by the environment variable `GRAPH_FIREHOSE_FETCH_BLOCK_RETRY_LIMIT`. + /// The default value is 10. + pub firehose_block_fetch_retry_limit: usize, + /// Set by the environment variable `GRAPH_FIREHOSE_FETCH_BLOCK_TIMEOUT_SECS`. + /// The default value is 60 seconds. + pub firehose_block_fetch_timeout: u64, } impl EnvVars { @@ -330,6 +337,8 @@ impl EnvVars { inner.firehose_disable_extended_blocks_for_chains, ), block_write_capacity: inner.block_write_capacity.0, + firehose_block_fetch_retry_limit: inner.firehose_block_fetch_retry_limit, + firehose_block_fetch_timeout: inner.firehose_block_fetch_timeout, }) } @@ -390,7 +399,7 @@ struct Inner { default = "false" )] allow_non_deterministic_fulltext_search: EnvVarBoolean, - #[envconfig(from = "GRAPH_MAX_SPEC_VERSION", default = "1.2.0")] + #[envconfig(from = "GRAPH_MAX_SPEC_VERSION", default = "1.3.0")] max_spec_version: Version, #[envconfig(from = "GRAPH_LOAD_WINDOW_SIZE", default = "300")] load_window_size_in_secs: u64, @@ -493,6 +502,10 @@ struct Inner { firehose_disable_extended_blocks_for_chains: Option, #[envconfig(from = "GRAPH_NODE_BLOCK_WRITE_CAPACITY", default = "4_000_000_000")] block_write_capacity: NoUnderscores, + #[envconfig(from = "GRAPH_FIREHOSE_FETCH_BLOCK_RETRY_LIMIT", default = "10")] + firehose_block_fetch_retry_limit: usize, + #[envconfig(from = "GRAPH_FIREHOSE_FETCH_BLOCK_TIMEOUT_SECS", default = "60")] + firehose_block_fetch_timeout: u64, } #[derive(Clone, Debug)] diff --git a/graph/src/env/store.rs b/graph/src/env/store.rs index ce574c94253..af0f076978f 100644 --- a/graph/src/env/store.rs +++ b/graph/src/env/store.rs @@ -128,6 +128,11 @@ pub struct EnvVarsStore { /// sufficiently, probably after 2024-12-01 /// Defaults to `false`, i.e. using the new fixed behavior pub last_rollup_from_poi: bool, + /// Safety switch to increase the number of columns used when + /// calculating the chunk size in `InsertQuery::chunk_size`. This can be + /// used to work around Postgres errors complaining 'number of + /// parameters must be between 0 and 65535' when inserting entities + pub insert_extra_cols: usize, } // This does not print any values avoid accidentally leaking any sensitive env vars @@ -177,6 +182,7 @@ impl From for EnvVarsStore { use_brin_for_all_query_types: x.use_brin_for_all_query_types, disable_block_cache_for_lookup: x.disable_block_cache_for_lookup, last_rollup_from_poi: x.last_rollup_from_poi, + insert_extra_cols: x.insert_extra_cols, } } } @@ -240,6 +246,8 @@ pub struct InnerStore { disable_block_cache_for_lookup: bool, #[envconfig(from = "GRAPH_STORE_LAST_ROLLUP_FROM_POI", default = "false")] last_rollup_from_poi: bool, + #[envconfig(from = "GRAPH_STORE_INSERT_EXTRA_COLS", default = "0")] + insert_extra_cols: usize, } #[derive(Clone, Copy, Debug)] diff --git a/graph/src/firehose/endpoints.rs b/graph/src/firehose/endpoints.rs index 72d3f986c9c..ebf27faa5a1 100644 --- a/graph/src/firehose/endpoints.rs +++ b/graph/src/firehose/endpoints.rs @@ -16,7 +16,7 @@ use async_trait::async_trait; use futures03::StreamExt; use http0::uri::{Scheme, Uri}; use itertools::Itertools; -use slog::Logger; +use slog::{error, info, trace, Logger}; use std::{collections::HashMap, fmt::Display, ops::ControlFlow, sync::Arc, time::Duration}; use tokio::sync::OnceCell; use tonic::codegen::InterceptedService; @@ -33,6 +33,7 @@ use crate::components::network_provider::NetworkDetails; use crate::components::network_provider::ProviderCheckStrategy; use crate::components::network_provider::ProviderManager; use crate::components::network_provider::ProviderName; +use crate::prelude::retry; /// This is constant because we found this magic number of connections after /// which the grpc connections start to hang. @@ -359,6 +360,142 @@ impl FirehoseEndpoint { } } + pub async fn get_block_by_ptr( + &self, + ptr: &BlockPtr, + logger: &Logger, + ) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + debug!( + logger, + "Connecting to firehose to retrieve block for ptr {}", ptr; + "provider" => self.provider.as_str(), + ); + + let req = firehose::SingleBlockRequest { + transforms: [].to_vec(), + reference: Some( + firehose::single_block_request::Reference::BlockHashAndNumber( + firehose::single_block_request::BlockHashAndNumber { + hash: ptr.hash.to_string(), + num: ptr.number as u64, + }, + ), + ), + }; + + let mut client = self.new_fetch_client(); + match client.block(req).await { + Ok(v) => Ok(M::decode( + v.get_ref().block.as_ref().unwrap().value.as_ref(), + )?), + Err(e) => return Err(anyhow::format_err!("firehose error {}", e)), + } + } + + pub async fn get_block_by_number( + &self, + number: u64, + logger: &Logger, + ) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + debug!( + logger, + "Connecting to firehose to retrieve block for number {}", number; + "provider" => self.provider.as_str(), + ); + + let req = firehose::SingleBlockRequest { + transforms: [].to_vec(), + reference: Some(firehose::single_block_request::Reference::BlockNumber( + firehose::single_block_request::BlockNumber { num: number }, + )), + }; + + let mut client = self.new_fetch_client(); + match client.block(req).await { + Ok(v) => Ok(M::decode( + v.get_ref().block.as_ref().unwrap().value.as_ref(), + )?), + Err(e) => return Err(anyhow::format_err!("firehose error {}", e)), + } + } + + pub async fn load_blocks_by_numbers( + self: Arc, + numbers: Vec, + logger: &Logger, + ) -> Result, anyhow::Error> + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + let mut blocks = Vec::with_capacity(numbers.len()); + + for number in numbers { + let provider_name = self.provider.as_str(); + + trace!( + logger, + "Loading block for block number {}", number; + "provider" => provider_name, + ); + + let retry_log_message = format!("get_block_by_number for block {}", number); + let endpoint_for_retry = self.cheap_clone(); + + let logger_for_retry = logger.clone(); + let logger_for_error = logger.clone(); + + let block = retry(retry_log_message, &logger_for_retry) + .limit(ENV_VARS.firehose_block_fetch_retry_limit) + .timeout_secs(ENV_VARS.firehose_block_fetch_timeout) + .run(move || { + let e = endpoint_for_retry.cheap_clone(); + let l = logger_for_retry.clone(); + async move { e.get_block_by_number::(number, &l).await } + }) + .await; + + match block { + Ok(block) => { + blocks.push(block); + } + Err(e) => { + error!( + logger_for_error, + "Failed to load block number {}: {}", number, e; + "provider" => provider_name, + ); + return Err(anyhow::format_err!( + "failed to load block number {}: {}", + number, + e + )); + } + } + } + + Ok(blocks) + } + + pub async fn genesis_block_ptr(&self, logger: &Logger) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + info!(logger, "Requesting genesis block from firehose"; + "provider" => self.provider.as_str()); + + // We use 0 here to mean the genesis block of the chain. Firehose + // when seeing start block number 0 will always return the genesis + // block of the chain, even if the chain's start block number is + // not starting at block #0. + self.block_ptr_for_number::(logger, 0).await + } + pub async fn block_ptr_for_number( &self, logger: &Logger, diff --git a/graph/src/runtime/gas/costs.rs b/graph/src/runtime/gas/costs.rs index 6436fc2102d..06decdf03aa 100644 --- a/graph/src/runtime/gas/costs.rs +++ b/graph/src/runtime/gas/costs.rs @@ -83,3 +83,10 @@ pub const JSON_FROM_BYTES: GasOp = GasOp { base_cost: DEFAULT_BASE_COST, size_mult: DEFAULT_GAS_PER_BYTE * 100, }; + +// Deeply nested YAML can take up more than 100 times the memory of the serialized format. +// Multiplying the size cost by 100 accounts for this. +pub const YAML_FROM_BYTES: GasOp = GasOp { + base_cost: DEFAULT_BASE_COST, + size_mult: DEFAULT_GAS_PER_BYTE * 100, +}; diff --git a/graph/src/runtime/mod.rs b/graph/src/runtime/mod.rs index d20d1eccde3..5622d37c100 100644 --- a/graph/src/runtime/mod.rs +++ b/graph/src/runtime/mod.rs @@ -368,7 +368,20 @@ pub enum IndexForAscTypeId { // ... // LastStarknetType = 4499, - // Reserved discriminant space for a future blockchain type IDs: [4,500, 5,499] + // Subgraph Data Source types + AscEntityTrigger = 4500, + + // Reserved discriminant space for YAML type IDs: [5,500, 6,499] + YamlValue = 5500, + YamlTaggedValue = 5501, + YamlTypedMapEntryValueValue = 5502, + YamlTypedMapValueValue = 5503, + YamlArrayValue = 5504, + YamlArrayTypedMapEntryValueValue = 5505, + YamlWrappedValue = 5506, + YamlResultValueBool = 5507, + + // Reserved discriminant space for a future blockchain type IDs: [6,500, 7,499] // // Generated with the following shell script: // diff --git a/graph/src/schema/entity_type.rs b/graph/src/schema/entity_type.rs index cee762afb5b..098b48362b9 100644 --- a/graph/src/schema/entity_type.rs +++ b/graph/src/schema/entity_type.rs @@ -150,6 +150,13 @@ impl EntityType { pub fn is_object_type(&self) -> bool { self.schema.is_object_type(self.atom) } + + /// Whether the table for this entity type uses a sequence for the `vid` or whether + /// `graph-node` sets them explicitly. See also [`InputSchema.strict_vid_order()`] + pub fn has_vid_seq(&self) -> bool { + // Currently the agregations entities don't have VIDs in insertion order + self.schema.strict_vid_order() && self.is_object_type() + } } impl fmt::Display for EntityType { diff --git a/graph/src/schema/input/mod.rs b/graph/src/schema/input/mod.rs index 84897299785..5ff8ddcda2f 100644 --- a/graph/src/schema/input/mod.rs +++ b/graph/src/schema/input/mod.rs @@ -17,6 +17,7 @@ use crate::data::graphql::{DirectiveExt, DocumentExt, ObjectTypeExt, TypeExt, Va use crate::data::store::{ self, EntityValidationError, IdType, IntoEntityIterator, TryIntoEntityIterator, ValueType, ID, }; +use crate::data::subgraph::SPEC_VERSION_1_3_0; use crate::data::value::Word; use crate::derive::CheapClone; use crate::prelude::q::Value; @@ -35,6 +36,7 @@ pub(crate) const POI_OBJECT: &str = "Poi$"; const POI_DIGEST: &str = "digest"; /// The name of the PoI attribute for storing the block time const POI_BLOCK_TIME: &str = "blockTime"; +pub(crate) const VID_FIELD: &str = "vid"; pub mod kw { pub const ENTITY: &str = "entity"; @@ -954,6 +956,7 @@ pub struct Inner { pool: Arc, /// A list of all timeseries types by interval agg_mappings: Box<[AggregationMapping]>, + spec_version: Version, } impl InputSchema { @@ -1041,6 +1044,7 @@ impl InputSchema { enum_map, pool, agg_mappings, + spec_version: spec_version.clone(), }), }) } @@ -1584,6 +1588,14 @@ impl InputSchema { }?; Some(EntityType::new(self.cheap_clone(), obj_type.name)) } + + /// How the values for the VID field are generated. + /// When this is `false`, this subgraph uses the old way of autoincrementing `vid` in the database. + /// When it is `true`, `graph-node` sets the `vid` explicitly to a number based on block number + /// and the order in which entities are written, and comparing by `vid` will order entities by that order. + pub fn strict_vid_order(&self) -> bool { + self.inner.spec_version >= SPEC_VERSION_1_3_0 + } } /// Create a new pool that contains the names of all the types defined @@ -1597,6 +1609,8 @@ fn atom_pool(document: &s::Document) -> AtomPool { pool.intern(POI_DIGEST); pool.intern(POI_BLOCK_TIME); + pool.intern(VID_FIELD); + for definition in &document.definitions { match definition { s::Definition::TypeDefinition(typedef) => match typedef { diff --git a/graph/src/schema/mod.rs b/graph/src/schema/mod.rs index af4de2e57f6..0b1a12cd338 100644 --- a/graph/src/schema/mod.rs +++ b/graph/src/schema/mod.rs @@ -21,7 +21,7 @@ pub mod ast; mod entity_key; mod entity_type; mod fulltext; -mod input; +pub(crate) mod input; pub use api::{is_introspection_field, APISchemaError, INTROSPECTION_QUERY_TYPE}; diff --git a/graph/src/util/intern.rs b/graph/src/util/intern.rs index c29f4a3672e..62ff3b4618f 100644 --- a/graph/src/util/intern.rs +++ b/graph/src/util/intern.rs @@ -308,6 +308,45 @@ impl Object { } } +impl Object { + fn len_ignore_atom(&self, atom: &Atom) -> usize { + // Because of tombstones and the ignored atom, we can't just return `self.entries.len()`. + self.entries + .iter() + .filter(|entry| entry.key != TOMBSTONE_KEY && entry.key != *atom) + .count() + } + + /// Check for equality while ignoring one particular element + pub fn eq_ignore_key(&self, other: &Self, ignore_key: &str) -> bool { + let ignore = self.pool.lookup(ignore_key); + let len1 = if let Some(to_ignore) = ignore { + self.len_ignore_atom(&to_ignore) + } else { + self.len() + }; + let len2 = if let Some(to_ignore) = other.pool.lookup(ignore_key) { + other.len_ignore_atom(&to_ignore) + } else { + other.len() + }; + if len1 != len2 { + return false; + } + + if self.same_pool(other) { + self.entries + .iter() + .filter(|e| e.key != TOMBSTONE_KEY && ignore.map_or(true, |ig| e.key != ig)) + .all(|Entry { key, value }| other.get_by_atom(key).map_or(false, |o| o == value)) + } else { + self.iter() + .filter(|(key, _)| *key != ignore_key) + .all(|(key, value)| other.get(key).map_or(false, |o| o == value)) + } + } +} + impl Object { /// Remove `key` from the object and return the value that was /// associated with the `key`. The entry is actually not removed for diff --git a/graph/src/util/mod.rs b/graph/src/util/mod.rs index 68c86daea3c..4cdf52a82a5 100644 --- a/graph/src/util/mod.rs +++ b/graph/src/util/mod.rs @@ -12,6 +12,8 @@ pub mod error; pub mod stats; +pub mod ogive; + pub mod cache_weight; pub mod timed_rw_lock; diff --git a/graph/src/util/ogive.rs b/graph/src/util/ogive.rs new file mode 100644 index 00000000000..476bfd76ce8 --- /dev/null +++ b/graph/src/util/ogive.rs @@ -0,0 +1,279 @@ +use std::ops::RangeInclusive; + +use crate::{constraint_violation, prelude::StoreError}; + +/// A helper to deal with cumulative histograms, also known as ogives. This +/// implementation is restricted to histograms where each bin has the same +/// size. As a cumulative function of a histogram, an ogive is a piecewise +/// linear function `f` and since it is strictly monotonically increasing, +/// it has an inverse `g`. +/// +/// For the given `points`, `f(points[i]) = i * bin_size` and `f` is the +/// piecewise linear interpolant between those points. The inverse `g` is +/// the piecewise linear interpolant of `g(i * bin_size) = points[i]`. Note +/// that that means that `f` divides the y-axis into `points.len()` equal +/// parts. +/// +/// The word 'ogive' is somewhat obscure, but has a lot fewer letters than +/// 'piecewise linear function'. Copolit also claims that it is also a lot +/// more fun to say. +pub struct Ogive { + /// The breakpoints of the piecewise linear function + points: Vec, + /// The size of each bin; the linear piece from `points[i]` to + /// `points[i+1]` rises by this much + bin_size: f64, + /// The range of the ogive, i.e., the minimum and maximum entries from + /// points + range: RangeInclusive, +} + +impl Ogive { + /// Create an ogive from a histogram with breaks at the given points and + /// a total count of `total` entries. As a function, the ogive is 0 at + /// `points[0]` and `total` at `points[points.len() - 1]`. + /// + /// The `points` must have at least one entry. The `points` are sorted + /// and deduplicated, i.e., they don't have to be in ascending order. + pub fn from_equi_histogram(mut points: Vec, total: usize) -> Result { + if points.is_empty() { + return Err(constraint_violation!( + "histogram must have at least one point" + )); + } + + points.sort_unstable(); + points.dedup(); + + let bins = points.len() - 1; + let bin_size = total as f64 / bins as f64; + let range = points[0]..=points[bins]; + let points = points.into_iter().map(|p| p as f64).collect(); + Ok(Self { + points, + bin_size, + range, + }) + } + + pub fn start(&self) -> i64 { + *self.range.start() + } + + pub fn end(&self) -> i64 { + *self.range.end() + } + + /// Find the next point `next` such that there are `size` entries + /// between `point` and `next`, i.e., such that `f(next) - f(point) = + /// size`. + /// + /// It is an error if `point` is smaller than `points[0]`. If `point` is + /// bigger than `points.last()`, that is returned instead. + /// + /// The method calculates `g(f(point) + size)` + pub fn next_point(&self, point: i64, size: usize) -> Result { + if point >= *self.range.end() { + return Ok(*self.range.end()); + } + // This can only fail if point < self.range.start + self.check_in_range(point)?; + + let point_value = self.value(point)?; + let next_value = point_value + size as i64; + let next_point = self.inverse(next_value)?; + Ok(next_point) + } + + /// Return the index of the support point immediately preceding `point`. + /// It is an error if `point` is outside the range of points of this + /// ogive; this also implies that the returned index is always strictly + /// less than `self.points.len() - 1` + fn interval_start(&self, point: i64) -> Result { + self.check_in_range(point)?; + + let point = point as f64; + let idx = self + .points + .iter() + .position(|&p| point < p) + .unwrap_or(self.points.len() - 1) + - 1; + Ok(idx) + } + + /// Return the value of the ogive at `point`, i.e., `f(point)`. It is an + /// error if `point` is outside the range of points of this ogive. + fn value(&self, point: i64) -> Result { + if self.points.len() == 1 { + return Ok(*self.range.end()); + } + + let idx = self.interval_start(point)?; + let bin_size = self.bin_size as f64; + let (a, b) = (self.points[idx], self.points[idx + 1]); + let point = point as f64; + let value = (idx as f64 + (point - a) / (b - a)) * bin_size; + Ok(value as i64) + } + + /// Return the value of the inverse ogive at `value`, i.e., `g(value)`. + /// It is an error if `value` is negative. If `value` is greater than + /// the total count of the ogive, the maximum point of the ogive is + /// returned. + fn inverse(&self, value: i64) -> Result { + let value = value as f64; + if value < 0.0 { + return Err(constraint_violation!("value {} can not be negative", value)); + } + let idx = (value / self.bin_size) as usize; + if idx >= self.points.len() - 1 { + return Ok(*self.range.end()); + } + let (a, b) = (self.points[idx] as f64, self.points[idx + 1] as f64); + let lambda = (value - idx as f64 * self.bin_size) / self.bin_size; + let x = (1.0 - lambda) * a + lambda * b; + Ok(x as i64) + } + + fn check_in_range(&self, point: i64) -> Result<(), StoreError> { + if !self.range.contains(&point) { + return Err(constraint_violation!( + "point {} is outside of the range [{}, {}]", + point, + self.range.start(), + self.range.end(), + )); + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn simple() { + // This is just the linear function y = (70 / 5) * (x - 10) + let points: Vec = vec![10, 20, 30, 40, 50, 60]; + let ogive = Ogive::from_equi_histogram(points, 700).unwrap(); + + // The function represented by `points` + fn f(x: i64) -> i64 { + 70 * (x - 10) / 5 + } + + // The inverse of `f` + fn g(x: i64) -> i64 { + x * 5 / 70 + 10 + } + + // Check that the ogive is correct + assert_eq!(ogive.bin_size, 700 as f64 / 5 as f64); + assert_eq!(ogive.range, 10..=60); + + // Test value method + for point in vec![20, 30, 45, 50, 60] { + assert_eq!(ogive.value(point).unwrap(), f(point), "value for {}", point); + } + + // Test next_point method + for step in vec![50, 140, 200] { + for value in vec![10, 20, 30, 35, 45, 50, 60] { + assert_eq!( + ogive.next_point(value, step).unwrap(), + g(f(value) + step as i64).min(60), + "inverse for {} with step {}", + value, + step + ); + } + } + + // Exceeding the range caps it at the maximum point + assert_eq!(ogive.next_point(50, 140).unwrap(), 60); + assert_eq!(ogive.next_point(50, 500).unwrap(), 60); + + // Point to the left of the range should return an error + assert!(ogive.next_point(9, 140).is_err()); + // Point to the right of the range gets capped + assert_eq!(ogive.next_point(61, 140).unwrap(), 60); + } + + #[test] + fn single_bin() { + // A histogram with only one bin + let points: Vec = vec![10, 20]; + let ogive = Ogive::from_equi_histogram(points, 700).unwrap(); + + // The function represented by `points` + fn f(x: i64) -> i64 { + 700 * (x - 10) / 10 + } + + // The inverse of `f` + fn g(x: i64) -> i64 { + x * 10 / 700 + 10 + } + + // Check that the ogive is correct + assert_eq!(ogive.bin_size, 700 as f64 / 1 as f64); + assert_eq!(ogive.range, 10..=20); + + // Test value method + for point in vec![10, 15, 20] { + assert_eq!(ogive.value(point).unwrap(), f(point), "value for {}", point); + } + + // Test next_point method + for step in vec![50, 140, 200] { + for value in vec![10, 15, 20] { + assert_eq!( + ogive.next_point(value, step).unwrap(), + g(f(value) + step as i64).min(20), + "inverse for {} with step {}", + value, + step + ); + } + } + + // Exceeding the range caps it at the maximum point + assert_eq!(ogive.next_point(20, 140).unwrap(), 20); + assert_eq!(ogive.next_point(20, 500).unwrap(), 20); + + // Point to the left of the range should return an error + assert!(ogive.next_point(9, 140).is_err()); + // Point to the right of the range gets capped + assert_eq!(ogive.next_point(21, 140).unwrap(), 20); + } + + #[test] + fn one_bin() { + let points: Vec = vec![10]; + let ogive = Ogive::from_equi_histogram(points, 700).unwrap(); + + assert_eq!(ogive.next_point(10, 1).unwrap(), 10); + assert_eq!(ogive.next_point(10, 4).unwrap(), 10); + assert_eq!(ogive.next_point(15, 1).unwrap(), 10); + + assert!(ogive.next_point(9, 1).is_err()); + } + + #[test] + fn exponential() { + let points: Vec = vec![32, 48, 56, 60, 62, 64]; + let ogive = Ogive::from_equi_histogram(points, 100).unwrap(); + + assert_eq!(ogive.value(50).unwrap(), 25); + assert_eq!(ogive.value(56).unwrap(), 40); + assert_eq!(ogive.value(58).unwrap(), 50); + assert_eq!(ogive.value(63).unwrap(), 90); + + assert_eq!(ogive.next_point(32, 40).unwrap(), 56); + assert_eq!(ogive.next_point(50, 10).unwrap(), 54); + assert_eq!(ogive.next_point(50, 50).unwrap(), 61); + assert_eq!(ogive.next_point(40, 40).unwrap(), 58); + } +} diff --git a/graph/tests/subgraph_datasource_tests.rs b/graph/tests/subgraph_datasource_tests.rs new file mode 100644 index 00000000000..2c357bf37cd --- /dev/null +++ b/graph/tests/subgraph_datasource_tests.rs @@ -0,0 +1,264 @@ +use std::{collections::BTreeMap, ops::Range, sync::Arc}; + +use graph::{ + blockchain::{ + block_stream::{ + EntityOperationKind, EntitySourceOperation, SubgraphTriggerScanRange, + TriggersAdapterWrapper, + }, + mock::MockTriggersAdapter, + Block, SubgraphFilter, Trigger, + }, + components::store::SourceableStore, + data_source::CausalityRegion, + prelude::{BlockHash, BlockNumber, BlockPtr, DeploymentHash, StoreError, Value}, + schema::{EntityType, InputSchema}, +}; +use slog::Logger; +use tonic::async_trait; + +pub struct MockSourcableStore { + entities: BTreeMap>, + schema: InputSchema, + block_ptr: Option, +} + +impl MockSourcableStore { + pub fn new( + entities: BTreeMap>, + schema: InputSchema, + block_ptr: Option, + ) -> Self { + Self { + entities, + schema, + block_ptr, + } + } + + pub fn set_block_ptr(&mut self, ptr: BlockPtr) { + self.block_ptr = Some(ptr); + } + + pub fn clear_block_ptr(&mut self) { + self.block_ptr = None; + } + + pub fn increment_block(&mut self) -> Result<(), &'static str> { + if let Some(ptr) = &self.block_ptr { + let new_number = ptr.number + 1; + self.block_ptr = Some(BlockPtr::new(ptr.hash.clone(), new_number)); + Ok(()) + } else { + Err("No block pointer set") + } + } + + pub fn decrement_block(&mut self) -> Result<(), &'static str> { + if let Some(ptr) = &self.block_ptr { + if ptr.number == 0 { + return Err("Block number already at 0"); + } + let new_number = ptr.number - 1; + self.block_ptr = Some(BlockPtr::new(ptr.hash.clone(), new_number)); + Ok(()) + } else { + Err("No block pointer set") + } + } +} + +#[async_trait] +impl SourceableStore for MockSourcableStore { + fn get_range( + &self, + entity_types: Vec, + _causality_region: CausalityRegion, + block_range: Range, + ) -> Result>, StoreError> { + Ok(self + .entities + .range(block_range) + .map(|(block_num, operations)| { + let filtered_ops: Vec = operations + .iter() + .filter(|op| entity_types.contains(&op.entity_type)) + .cloned() + .collect(); + (*block_num, filtered_ops) + }) + .filter(|(_, ops)| !ops.is_empty()) + .collect()) + } + + fn input_schema(&self) -> InputSchema { + self.schema.clone() + } + + async fn block_ptr(&self) -> Result, StoreError> { + Ok(self.block_ptr.clone()) + } +} + +#[tokio::test] +async fn test_triggers_adapter_with_entities() { + let id = DeploymentHash::new("test_deployment").unwrap(); + let schema = InputSchema::parse_latest( + r#" + type User @entity { + id: String! + name: String! + age: Int + } + type Post @entity { + id: String! + title: String! + author: String! + } + "#, + id.clone(), + ) + .unwrap(); + + let user1 = schema + .make_entity(vec![ + ("id".into(), Value::String("user1".to_owned())), + ("name".into(), Value::String("Alice".to_owned())), + ("age".into(), Value::Int(30)), + ]) + .unwrap(); + + let user2 = schema + .make_entity(vec![ + ("id".into(), Value::String("user2".to_owned())), + ("name".into(), Value::String("Bob".to_owned())), + ("age".into(), Value::Int(25)), + ]) + .unwrap(); + + let post = schema + .make_entity(vec![ + ("id".into(), Value::String("post1".to_owned())), + ("title".into(), Value::String("Test Post".to_owned())), + ("author".into(), Value::String("user1".to_owned())), + ]) + .unwrap(); + + let user_type = schema.entity_type("User").unwrap(); + let post_type = schema.entity_type("Post").unwrap(); + + let entity1 = EntitySourceOperation { + entity_type: user_type.clone(), + entity: user1, + entity_op: EntityOperationKind::Create, + vid: 1, + }; + + let entity2 = EntitySourceOperation { + entity_type: user_type, + entity: user2, + entity_op: EntityOperationKind::Create, + vid: 2, + }; + + let post_entity = EntitySourceOperation { + entity_type: post_type, + entity: post, + entity_op: EntityOperationKind::Create, + vid: 3, + }; + + let mut entities = BTreeMap::new(); + entities.insert(1, vec![entity1, post_entity]); // Block 1 has both User and Post + entities.insert(2, vec![entity2]); // Block 2 has only User + + // Create block hash and store + let hash_bytes: [u8; 32] = [0u8; 32]; + let block_hash = BlockHash(hash_bytes.to_vec().into_boxed_slice()); + let initial_block = BlockPtr::new(block_hash, 0); + let store = Arc::new(MockSourcableStore::new( + entities, + schema.clone(), + Some(initial_block), + )); + + let adapter = Arc::new(MockTriggersAdapter {}); + let wrapper = TriggersAdapterWrapper::new(adapter, vec![store]); + + // Filter only for User entities + let filter = SubgraphFilter { + subgraph: id, + start_block: 0, + entities: vec!["User".to_string()], // Only monitoring User entities + manifest_idx: 0, + }; + + let logger = Logger::root(slog::Discard, slog::o!()); + let result = wrapper + .blocks_with_subgraph_triggers(&logger, &[filter], SubgraphTriggerScanRange::Range(1, 3)) + .await; + + assert!(result.is_ok(), "Failed to get triggers: {:?}", result.err()); + let blocks = result.unwrap(); + + assert_eq!( + blocks.len(), + 3, + "Should have found blocks with entities plus the last block" + ); + + let block1 = &blocks[0]; + assert_eq!(block1.block.number(), 1, "First block should be number 1"); + let triggers1 = &block1.trigger_data; + assert_eq!( + triggers1.len(), + 1, + "Block 1 should have exactly one trigger (User, not Post)" + ); + + if let Trigger::Subgraph(trigger_data) = &triggers1[0] { + assert_eq!( + trigger_data.entity.entity_type.as_str(), + "User", + "Trigger should be for User entity" + ); + assert_eq!( + trigger_data.entity.vid, 1, + "Should be the first User entity" + ); + } else { + panic!("Expected subgraph trigger"); + } + + let block2 = &blocks[1]; + assert_eq!(block2.block.number(), 2, "Second block should be number 2"); + let triggers2 = &block2.trigger_data; + assert_eq!( + triggers2.len(), + 1, + "Block 2 should have exactly one trigger" + ); + + if let Trigger::Subgraph(trigger_data) = &triggers2[0] { + assert_eq!( + trigger_data.entity.entity_type.as_str(), + "User", + "Trigger should be for User entity" + ); + assert_eq!( + trigger_data.entity.vid, 2, + "Should be the second User entity" + ); + } else { + panic!("Expected subgraph trigger"); + } + + let block3 = &blocks[2]; + assert_eq!(block3.block.number(), 3, "Third block should be number 3"); + let triggers3 = &block3.trigger_data; + assert_eq!( + triggers3.len(), + 0, + "Block 3 should have no triggers but be included as it's the last block" + ); +} diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index 54752a3d25f..902241d3d54 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -12,7 +12,7 @@ use graph::prelude::{MetricsRegistry, BLOCK_NUMBER_MAX}; use graph::{data::graphql::load_manager::LoadManager, prelude::chrono, prometheus::Registry}; use graph::{ prelude::{ - anyhow::{self, Context as AnyhowContextTrait}, + anyhow::{self, anyhow, Context as AnyhowContextTrait}, info, tokio, Logger, NodeId, }, url::Url, @@ -143,6 +143,12 @@ pub enum Command { /// List only used (current and pending) versions #[clap(long, short)] used: bool, + /// List names only for the active deployment + #[clap(long, short)] + brief: bool, + /// Do not print subgraph names + #[clap(long, short = 'N')] + no_name: bool, }, /// Manage unused deployments /// @@ -1127,6 +1133,8 @@ async fn main() -> anyhow::Result<()> { status, used, all, + brief, + no_name, } => { let (store, primary_pool) = ctx.store_and_primary(); @@ -1142,6 +1150,8 @@ async fn main() -> anyhow::Result<()> { status, used, all, + brief, + no_name, }; commands::deployment::info::run(ctx, args) @@ -1198,12 +1208,22 @@ async fn main() -> anyhow::Result<()> { Remove { name } => commands::remove::run(ctx.subgraph_store(), &name), Create { name } => commands::create::run(ctx.subgraph_store(), name), Unassign { deployment } => { - let sender = ctx.notification_sender(); - commands::assign::unassign(ctx.primary_pool(), &sender, &deployment).await + let notifications_sender = ctx.notification_sender(); + let primary_pool = ctx.primary_pool(); + let deployment = make_deployment_selector(deployment); + commands::deployment::unassign::run(primary_pool, notifications_sender, deployment) } Reassign { deployment, node } => { - let sender = ctx.notification_sender(); - commands::assign::reassign(ctx.primary_pool(), &sender, &deployment, node) + let notifications_sender = ctx.notification_sender(); + let primary_pool = ctx.primary_pool(); + let deployment = make_deployment_selector(deployment); + let node = NodeId::new(node).map_err(|node| anyhow!("invalid node id {:?}", node))?; + commands::deployment::reassign::run( + primary_pool, + notifications_sender, + deployment, + &node, + ) } Pause { deployment } => { let notifications_sender = ctx.notification_sender(); diff --git a/node/src/chain.rs b/node/src/chain.rs index 1c62bf2248e..289c0580c2e 100644 --- a/node/src/chain.rs +++ b/node/src/chain.rs @@ -433,6 +433,7 @@ pub async fn networks_as_chains( }; let client = Arc::new(cc); + let eth_adapters = Arc::new(eth_adapters); let adapter_selector = EthereumAdapterSelector::new( logger_factory.clone(), client.clone(), @@ -455,7 +456,7 @@ pub async fn networks_as_chains( Arc::new(EthereumBlockRefetcher {}), Arc::new(adapter_selector), Arc::new(EthereumRuntimeAdapterBuilder {}), - Arc::new(eth_adapters.clone()), + eth_adapters, ENV_VARS.reorg_threshold, polling_interval, true, diff --git a/node/src/manager/commands/deployment/info.rs b/node/src/manager/commands/deployment/info.rs index 1b4f646a212..417092d6e2d 100644 --- a/node/src/manager/commands/deployment/info.rs +++ b/node/src/manager/commands/deployment/info.rs @@ -1,4 +1,6 @@ +use std::collections::BTreeMap; use std::collections::HashMap; +use std::io; use std::sync::Arc; use anyhow::bail; @@ -12,7 +14,8 @@ use graphman::deployment::Deployment; use graphman::deployment::DeploymentSelector; use graphman::deployment::DeploymentVersionSelector; -use crate::manager::display::List; +use crate::manager::display::Columns; +use crate::manager::display::Row; pub struct Context { pub primary_pool: ConnectionPool, @@ -26,6 +29,8 @@ pub struct Args { pub status: bool, pub used: bool, pub all: bool, + pub brief: bool, + pub no_name: bool, } pub fn run(ctx: Context, args: Args) -> Result<()> { @@ -41,6 +46,8 @@ pub fn run(ctx: Context, args: Args) -> Result<()> { status, used, all, + brief, + no_name, } = args; let deployment = match deployment { @@ -65,8 +72,7 @@ pub fn run(ctx: Context, args: Args) -> Result<()> { None }; - print_info(deployments, statuses); - + render(brief, no_name, deployments, statuses); Ok(()) } @@ -85,77 +91,86 @@ fn make_deployment_version_selector( } } -fn print_info(deployments: Vec, statuses: Option>) { - let mut headers = vec![ - "Name", - "Status", - "Hash", - "Namespace", - "Shard", - "Active", - "Chain", - "Node ID", - ]; - - if statuses.is_some() { - headers.extend(vec![ - "Paused", - "Synced", - "Health", - "Earliest Block", - "Latest Block", - "Chain Head Block", - ]); - } +const NONE: &str = "---"; - let mut list = List::new(headers); +fn optional(s: Option) -> String { + s.map(|x| x.to_string()).unwrap_or(NONE.to_owned()) +} - const NONE: &str = "---"; +fn render( + brief: bool, + no_name: bool, + deployments: Vec, + statuses: Option>, +) { + fn name_and_status(deployment: &Deployment) -> String { + format!("{} ({})", deployment.name, deployment.version_status) + } - fn optional(s: Option) -> String { - s.map(|x| x.to_string()).unwrap_or(NONE.to_owned()) + fn number(n: Option) -> String { + n.map(|x| format!("{x}")).unwrap_or(NONE.to_owned()) } + let mut table = Columns::default(); + + let mut combined: BTreeMap<_, Vec<_>> = BTreeMap::new(); for deployment in deployments { - let mut row = vec![ - deployment.name, - deployment.version_status, - deployment.hash, - deployment.namespace, - deployment.shard, - deployment.is_active.to_string(), - deployment.chain, - optional(deployment.node_id), - ]; - - let status = statuses.as_ref().map(|x| x.get(&deployment.id)); - - match status { - Some(Some(status)) => { - row.extend(vec![ - optional(status.is_paused), - status.is_synced.to_string(), - status.health.as_str().to_string(), - status.earliest_block_number.to_string(), - optional(status.latest_block.as_ref().map(|x| x.number)), - optional(status.chain_head_block.as_ref().map(|x| x.number)), - ]); + let status = statuses.as_ref().and_then(|x| x.get(&deployment.id)); + combined + .entry(deployment.id) + .or_default() + .push((deployment, status)); + } + + let mut first = true; + for (_, deployments) in combined { + let deployment = &deployments[0].0; + if first { + first = false; + } else { + table.push_row(Row::separator()); + } + table.push_row([ + "Namespace", + &format!("{} [{}]", deployment.namespace, deployment.shard), + ]); + table.push_row(["Hash", &deployment.hash]); + if !no_name && (!brief || deployment.is_active) { + if deployments.len() > 1 { + table.push_row(["Versions", &name_and_status(deployment)]); + for (d, _) in &deployments[1..] { + table.push_row(["", &name_and_status(d)]); + } + } else { + table.push_row(["Version", &name_and_status(deployment)]); } - Some(None) => { - row.extend(vec![ - NONE.to_owned(), - NONE.to_owned(), - NONE.to_owned(), - NONE.to_owned(), - NONE.to_owned(), - NONE.to_owned(), - ]); + table.push_row(["Chain", &deployment.chain]); + } + table.push_row(["Node ID", &optional(deployment.node_id.as_ref())]); + table.push_row(["Active", &deployment.is_active.to_string()]); + if let Some((_, status)) = deployments.get(0) { + if let Some(status) = status { + table.push_row(["Paused", &optional(status.is_paused)]); + table.push_row(["Synced", &status.is_synced.to_string()]); + table.push_row(["Health", status.health.as_str()]); + + let earliest = status.earliest_block_number; + let latest = status.latest_block.as_ref().map(|x| x.number); + let chain_head = status.chain_head_block.as_ref().map(|x| x.number); + let behind = match (latest, chain_head) { + (Some(latest), Some(chain_head)) => Some(chain_head - latest), + _ => None, + }; + + table.push_row(["Earliest Block", &earliest.to_string()]); + table.push_row(["Latest Block", &number(latest)]); + table.push_row(["Chain Head Block", &number(chain_head)]); + if let Some(behind) = behind { + table.push_row([" Blocks behind", &behind.to_string()]); + } } - None => {} } - - list.append(row); } - list.render(); + table.render(&mut io::stdout()).ok(); } diff --git a/node/src/manager/commands/deployment/mod.rs b/node/src/manager/commands/deployment/mod.rs index 98910d7b4c4..8fd0237d3a7 100644 --- a/node/src/manager/commands/deployment/mod.rs +++ b/node/src/manager/commands/deployment/mod.rs @@ -1,4 +1,6 @@ pub mod info; pub mod pause; +pub mod reassign; pub mod restart; pub mod resume; +pub mod unassign; diff --git a/node/src/manager/commands/deployment/pause.rs b/node/src/manager/commands/deployment/pause.rs index 1cd4808fad8..2a690ea688a 100644 --- a/node/src/manager/commands/deployment/pause.rs +++ b/node/src/manager/commands/deployment/pause.rs @@ -3,8 +3,9 @@ use std::sync::Arc; use anyhow::Result; use graph_store_postgres::connection_pool::ConnectionPool; use graph_store_postgres::NotificationSender; -use graphman::commands::deployment::pause::load_active_deployment; -use graphman::commands::deployment::pause::pause_active_deployment; +use graphman::commands::deployment::pause::{ + load_active_deployment, pause_active_deployment, PauseDeploymentError, +}; use graphman::deployment::DeploymentSelector; pub fn run( @@ -12,11 +13,22 @@ pub fn run( notification_sender: Arc, deployment: DeploymentSelector, ) -> Result<()> { - let active_deployment = load_active_deployment(primary_pool.clone(), &deployment)?; + let active_deployment = load_active_deployment(primary_pool.clone(), &deployment); - println!("Pausing deployment {} ...", active_deployment.locator()); - - pause_active_deployment(primary_pool, notification_sender, active_deployment)?; + match active_deployment { + Ok(active_deployment) => { + println!("Pausing deployment {} ...", active_deployment.locator()); + pause_active_deployment(primary_pool, notification_sender, active_deployment)?; + } + Err(PauseDeploymentError::AlreadyPaused(locator)) => { + println!("Deployment {} is already paused", locator); + return Ok(()); + } + Err(PauseDeploymentError::Common(e)) => { + println!("Failed to load active deployment: {}", e); + return Err(e.into()); + } + } Ok(()) } diff --git a/node/src/manager/commands/deployment/reassign.rs b/node/src/manager/commands/deployment/reassign.rs new file mode 100644 index 00000000000..60528f16206 --- /dev/null +++ b/node/src/manager/commands/deployment/reassign.rs @@ -0,0 +1,41 @@ +use std::sync::Arc; + +use anyhow::Result; +use graph::prelude::NodeId; +use graph_store_postgres::connection_pool::ConnectionPool; +use graph_store_postgres::NotificationSender; +use graphman::commands::deployment::reassign::{ + load_deployment, reassign_deployment, ReassignResult, +}; +use graphman::deployment::DeploymentSelector; + +pub fn run( + primary_pool: ConnectionPool, + notification_sender: Arc, + deployment: DeploymentSelector, + node: &NodeId, +) -> Result<()> { + let deployment = load_deployment(primary_pool.clone(), &deployment)?; + + println!("Reassigning deployment {}", deployment.locator()); + + let reassign_result = + reassign_deployment(primary_pool, notification_sender, &deployment, node)?; + + match reassign_result { + ReassignResult::EmptyResponse => { + println!( + "Deployment {} assigned to node {}", + deployment.locator(), + node + ); + } + ReassignResult::CompletedWithWarnings(warnings) => { + for msg in warnings { + println!("{}", msg); + } + } + } + + Ok(()) +} diff --git a/node/src/manager/commands/deployment/unassign.rs b/node/src/manager/commands/deployment/unassign.rs new file mode 100644 index 00000000000..45567e81f63 --- /dev/null +++ b/node/src/manager/commands/deployment/unassign.rs @@ -0,0 +1,22 @@ +use std::sync::Arc; + +use anyhow::Result; +use graph_store_postgres::connection_pool::ConnectionPool; +use graph_store_postgres::NotificationSender; +use graphman::commands::deployment::unassign::load_assigned_deployment; +use graphman::commands::deployment::unassign::unassign_deployment; +use graphman::deployment::DeploymentSelector; + +pub fn run( + primary_pool: ConnectionPool, + notification_sender: Arc, + deployment: DeploymentSelector, +) -> Result<()> { + let assigned_deployment = load_assigned_deployment(primary_pool.clone(), &deployment)?; + + println!("Unassigning deployment {}", assigned_deployment.locator()); + + unassign_deployment(primary_pool, notification_sender, assigned_deployment)?; + + Ok(()) +} diff --git a/node/src/manager/display.rs b/node/src/manager/display.rs index 694eaf629bf..7d27b8269cb 100644 --- a/node/src/manager/display.rs +++ b/node/src/manager/display.rs @@ -1,3 +1,7 @@ +use std::io::{self, Write}; + +const LINE_WIDTH: usize = 78; + pub struct List { pub headers: Vec, pub rows: Vec>, @@ -29,8 +33,6 @@ impl List { } pub fn render(&self) { - const LINE_WIDTH: usize = 78; - let header_width = self.headers.iter().map(|h| h.len()).max().unwrap_or(0); let header_width = if header_width < 5 { 5 } else { header_width }; let mut first = true; @@ -52,3 +54,97 @@ impl List { } } } + +/// A more general list of columns than `List`. In practical terms, this is +/// a very simple table with two columns, where both columns are +/// left-aligned +pub struct Columns { + widths: Vec, + rows: Vec, +} + +impl Columns { + pub fn push_row>(&mut self, row: R) { + let row = row.into(); + for (idx, width) in row.widths().iter().enumerate() { + if idx >= self.widths.len() { + self.widths.push(*width); + } else { + self.widths[idx] = (*width).max(self.widths[idx]); + } + } + self.rows.push(row); + } + + pub fn render(&self, out: &mut dyn Write) -> io::Result<()> { + for row in &self.rows { + row.render(out, &self.widths)?; + } + Ok(()) + } +} + +impl Default for Columns { + fn default() -> Self { + Self { + widths: Vec::new(), + rows: Vec::new(), + } + } +} + +pub enum Row { + Cells(Vec), + Separator, +} + +impl Row { + pub fn separator() -> Self { + Self::Separator + } + + fn widths(&self) -> Vec { + match self { + Row::Cells(cells) => cells.iter().map(|cell| cell.len()).collect(), + Row::Separator => vec![], + } + } + + fn render(&self, out: &mut dyn Write, widths: &[usize]) -> io::Result<()> { + match self { + Row::Cells(cells) => { + for (idx, cell) in cells.iter().enumerate() { + if idx > 0 { + write!(out, " | ")?; + } + write!(out, "{cell:width$}", width = widths[idx])?; + } + } + Row::Separator => { + let total_width = widths.iter().sum::(); + let extra_width = if total_width >= LINE_WIDTH { + 0 + } else { + LINE_WIDTH - total_width + }; + for (idx, width) in widths.iter().enumerate() { + if idx > 0 { + write!(out, "-+-")?; + } + if idx == widths.len() - 1 { + write!(out, "{:- for Row { + fn from(row: [&str; 2]) -> Self { + Self::Cells(row.iter().map(|s| s.to_string()).collect()) + } +} diff --git a/node/src/store_builder.rs b/node/src/store_builder.rs index 2a39d0ea6ed..7fadf6b92c2 100644 --- a/node/src/store_builder.rs +++ b/node/src/store_builder.rs @@ -191,7 +191,7 @@ impl StoreBuilder { ); block_store .update_db_version() - .expect("Updating `db_version` works"); + .expect("Updating `db_version` should work"); Arc::new(DieselStore::new(subgraph_store, block_store)) } diff --git a/runtime/test/src/common.rs b/runtime/test/src/common.rs index 25e01776629..7641dd06d8b 100644 --- a/runtime/test/src/common.rs +++ b/runtime/test/src/common.rs @@ -3,14 +3,13 @@ use graph::blockchain::BlockTime; use graph::components::store::DeploymentLocator; use graph::data::subgraph::*; use graph::data_source; +use graph::data_source::common::MappingABI; use graph::env::EnvVars; use graph::ipfs::IpfsRpcClient; use graph::ipfs::ServerAddress; use graph::log; use graph::prelude::*; -use graph_chain_ethereum::{ - Chain, DataSource, DataSourceTemplate, Mapping, MappingABI, TemplateSource, -}; +use graph_chain_ethereum::{Chain, DataSource, DataSourceTemplate, Mapping, TemplateSource}; use graph_runtime_wasm::host_exports::DataSourceDetails; use graph_runtime_wasm::{HostExports, MappingContext}; use semver::Version; diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index 561820760d4..53a84aec5f1 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -477,13 +477,13 @@ async fn test_ipfs_block() { // The user_data value we use with calls to ipfs_map const USER_DATA: &str = "user_data"; -fn make_thing(id: &str, value: &str) -> (String, EntityModification) { +fn make_thing(id: &str, value: &str, vid: i64) -> (String, EntityModification) { const DOCUMENT: &str = " type Thing @entity { id: String!, value: String!, extra: String }"; lazy_static! { static ref SCHEMA: InputSchema = InputSchema::raw(DOCUMENT, "doesntmatter"); static ref THING_TYPE: EntityType = SCHEMA.entity_type("Thing").unwrap(); } - let data = entity! { SCHEMA => id: id, value: value, extra: USER_DATA }; + let data = entity! { SCHEMA => id: id, value: value, extra: USER_DATA, vid: vid }; let key = THING_TYPE.parse_key(id).unwrap(); ( format!("{{ \"id\": \"{}\", \"value\": \"{}\"}}", id, value), @@ -553,8 +553,8 @@ async fn test_ipfs_map(api_version: Version, json_error_msg: &str) { let subgraph_id = "ipfsMap"; // Try it with two valid objects - let (str1, thing1) = make_thing("one", "eins"); - let (str2, thing2) = make_thing("two", "zwei"); + let (str1, thing1) = make_thing("one", "eins", 100); + let (str2, thing2) = make_thing("two", "zwei", 100); let ops = run_ipfs_map( subgraph_id, format!("{}\n{}", str1, str2), @@ -1001,8 +1001,8 @@ async fn test_entity_store(api_version: Version) { let schema = store.input_schema(&deployment.hash).unwrap(); - let alex = entity! { schema => id: "alex", name: "Alex" }; - let steve = entity! { schema => id: "steve", name: "Steve" }; + let alex = entity! { schema => id: "alex", name: "Alex", vid: 0i64 }; + let steve = entity! { schema => id: "steve", name: "Steve", vid: 1i64 }; let user_type = schema.entity_type("User").unwrap(); test_store::insert_entities( &deployment, @@ -1698,3 +1698,71 @@ async fn test_store_ts() { "Cannot get entity of type `Stats`. The type must be an @entity type", ); } + +async fn test_yaml_parsing(api_version: Version, gas_used: u64) { + let mut module = test_module( + "yamlParsing", + mock_data_source( + &wasm_file_path("yaml_parsing.wasm", api_version.clone()), + api_version.clone(), + ), + api_version, + ) + .await; + + let mut test = |input: &str, expected: &str| { + let ptr: AscPtr = module.invoke_export1("handleYaml", input.as_bytes()); + let resp: String = module.asc_get(ptr).unwrap(); + assert_eq!(resp, expected, "failed on input: {input}"); + }; + + // Test invalid YAML; + test("{a: 1, - b: 2}", "error"); + + // Test size limit; + test(&"x".repeat(10_000_0001), "error"); + + // Test nulls; + test("null", "(0) null"); + + // Test booleans; + test("false", "(1) false"); + test("true", "(1) true"); + + // Test numbers; + test("12345", "(2) 12345"); + test("12345.6789", "(2) 12345.6789"); + + // Test strings; + test("aa bb cc", "(3) aa bb cc"); + test("\"aa bb cc\"", "(3) aa bb cc"); + + // Test arrays; + test("[1, 2, 3, 4]", "(4) [(2) 1, (2) 2, (2) 3, (2) 4]"); + test("- 1\n- 2\n- 3\n- 4", "(4) [(2) 1, (2) 2, (2) 3, (2) 4]"); + + // Test objects; + test("{a: 1, b: 2, c: 3}", "(5) {a: (2) 1, b: (2) 2, c: (2) 3}"); + test("a: 1\nb: 2\nc: 3", "(5) {a: (2) 1, b: (2) 2, c: (2) 3}"); + + // Test tagged values; + test("!AA bb cc", "(6) !AA (3) bb cc"); + + // Test nesting; + test( + "aa:\n bb:\n - cc: !DD ee", + "(5) {aa: (5) {bb: (4) [(5) {cc: (6) !DD (3) ee}]}}", + ); + + assert_eq!(module.gas_used(), gas_used, "gas used"); +} + +#[tokio::test] +async fn yaml_parsing_v0_0_4() { + test_yaml_parsing(API_VERSION_0_0_4, 10462217077171).await; +} + +#[tokio::test] +async fn yaml_parsing_v0_0_5() { + test_yaml_parsing(API_VERSION_0_0_5, 10462245390665).await; +} diff --git a/runtime/test/wasm_test/api_version_0_0_4/yaml_parsing.ts b/runtime/test/wasm_test/api_version_0_0_4/yaml_parsing.ts new file mode 100644 index 00000000000..b3efc9ba205 --- /dev/null +++ b/runtime/test/wasm_test/api_version_0_0_4/yaml_parsing.ts @@ -0,0 +1,20 @@ +import "allocator/arena"; + +import {Bytes, Result} from "../api_version_0_0_5/common/types"; +import {debug, YAMLValue} from "../api_version_0_0_5/common/yaml"; + +export {memory}; + +declare namespace yaml { + function try_fromBytes(data: Bytes): Result; +} + +export function handleYaml(data: Bytes): string { + let result = yaml.try_fromBytes(data); + + if (result.isError) { + return "error"; + } + + return debug(result.value); +} diff --git a/runtime/test/wasm_test/api_version_0_0_4/yaml_parsing.wasm b/runtime/test/wasm_test/api_version_0_0_4/yaml_parsing.wasm new file mode 100644 index 00000000000..cb132344ce3 Binary files /dev/null and b/runtime/test/wasm_test/api_version_0_0_4/yaml_parsing.wasm differ diff --git a/runtime/test/wasm_test/api_version_0_0_5/common/yaml.ts b/runtime/test/wasm_test/api_version_0_0_5/common/yaml.ts new file mode 100644 index 00000000000..135635475f1 --- /dev/null +++ b/runtime/test/wasm_test/api_version_0_0_5/common/yaml.ts @@ -0,0 +1,139 @@ +import {TypedMap} from './types'; + +export enum YAMLValueKind { + NULL = 0, + BOOL = 1, + NUMBER = 2, + STRING = 3, + ARRAY = 4, + OBJECT = 5, + TAGGED = 6, +} + +export class YAMLValue { + kind: YAMLValueKind; + data: u64; + + isBool(): boolean { + return this.kind == YAMLValueKind.BOOL; + } + + isNumber(): boolean { + return this.kind == YAMLValueKind.NUMBER; + } + + isString(): boolean { + return this.kind == YAMLValueKind.STRING; + } + + isArray(): boolean { + return this.kind == YAMLValueKind.ARRAY; + } + + isObject(): boolean { + return this.kind == YAMLValueKind.OBJECT; + } + + isTagged(): boolean { + return this.kind == YAMLValueKind.TAGGED; + } + + + toBool(): boolean { + assert(this.isBool(), 'YAML value is not a boolean'); + return this.data != 0; + } + + toNumber(): string { + assert(this.isNumber(), 'YAML value is not a number'); + return changetype(this.data as usize); + } + + toString(): string { + assert(this.isString(), 'YAML value is not a string'); + return changetype(this.data as usize); + } + + toArray(): Array { + assert(this.isArray(), 'YAML value is not an array'); + return changetype>(this.data as usize); + } + + toObject(): TypedMap { + assert(this.isObject(), 'YAML value is not an object'); + return changetype>(this.data as usize); + } + + toTagged(): YAMLTaggedValue { + assert(this.isTagged(), 'YAML value is not tagged'); + return changetype(this.data as usize); + } +} + +export class YAMLTaggedValue { + tag: string; + value: YAMLValue; +} + + +export function debug(value: YAMLValue): string { + return "(" + value.kind.toString() + ") " + debug_value(value); +} + +function debug_value(value: YAMLValue): string { + switch (value.kind) { + case YAMLValueKind.NULL: + return "null"; + case YAMLValueKind.BOOL: + return value.toBool() ? "true" : "false"; + case YAMLValueKind.NUMBER: + return value.toNumber(); + case YAMLValueKind.STRING: + return value.toString(); + case YAMLValueKind.ARRAY: { + let arr = value.toArray(); + + let s = "["; + for (let i = 0; i < arr.length; i++) { + if (i > 0) { + s += ", "; + } + s += debug(arr[i]); + } + s += "]"; + + return s; + } + case YAMLValueKind.OBJECT: { + let arr = value.toObject().entries.sort((a, b) => { + if (a.key.toString() < b.key.toString()) { + return -1; + } + + if (a.key.toString() > b.key.toString()) { + return 1; + } + + return 0; + }); + + let s = "{"; + for (let i = 0; i < arr.length; i++) { + if (i > 0) { + s += ", "; + } + s += debug_value(arr[i].key) + ": " + debug(arr[i].value); + } + s += "}"; + + return s; + } + case YAMLValueKind.TAGGED: { + let tagged = value.toTagged(); + + return tagged.tag + " " + debug(tagged.value); + } + default: + return "undefined"; + } +} diff --git a/runtime/test/wasm_test/api_version_0_0_5/yaml_parsing.ts b/runtime/test/wasm_test/api_version_0_0_5/yaml_parsing.ts new file mode 100644 index 00000000000..c89eb611bb2 --- /dev/null +++ b/runtime/test/wasm_test/api_version_0_0_5/yaml_parsing.ts @@ -0,0 +1,62 @@ +import {debug, YAMLValue, YAMLTaggedValue} from './common/yaml'; +import {Bytes, Result, TypedMap, TypedMapEntry, Wrapped} from './common/types'; + +enum TypeId { + STRING = 0, + UINT8_ARRAY = 6, + + YamlValue = 5500, + YamlTaggedValue = 5501, + YamlTypedMapEntryValueValue = 5502, + YamlTypedMapValueValue = 5503, + YamlArrayValue = 5504, + YamlArrayTypedMapEntryValueValue = 5505, + YamlWrappedValue = 5506, + YamlResultValueBool = 5507, +} + +export function id_of_type(type_id_index: TypeId): usize { + switch (type_id_index) { + case TypeId.STRING: + return idof(); + case TypeId.UINT8_ARRAY: + return idof(); + + case TypeId.YamlValue: + return idof(); + case TypeId.YamlTaggedValue: + return idof(); + case TypeId.YamlTypedMapEntryValueValue: + return idof>(); + case TypeId.YamlTypedMapValueValue: + return idof>(); + case TypeId.YamlArrayValue: + return idof>(); + case TypeId.YamlArrayTypedMapEntryValueValue: + return idof>>(); + case TypeId.YamlWrappedValue: + return idof>(); + case TypeId.YamlResultValueBool: + return idof>(); + default: + return 0; + } +} + +export function allocate(n: usize): usize { + return __alloc(n); +} + +declare namespace yaml { + function try_fromBytes(data: Bytes): Result; +} + +export function handleYaml(data: Bytes): string { + let result = yaml.try_fromBytes(data); + + if (result.isError) { + return "error"; + } + + return debug(result.value); +} diff --git a/runtime/test/wasm_test/api_version_0_0_5/yaml_parsing.wasm b/runtime/test/wasm_test/api_version_0_0_5/yaml_parsing.wasm new file mode 100644 index 00000000000..131ded5d04c Binary files /dev/null and b/runtime/test/wasm_test/api_version_0_0_5/yaml_parsing.wasm differ diff --git a/runtime/wasm/Cargo.toml b/runtime/wasm/Cargo.toml index 0e6e5d64100..3e74e9f985e 100644 --- a/runtime/wasm/Cargo.toml +++ b/runtime/wasm/Cargo.toml @@ -20,3 +20,5 @@ wasm-instrument = { version = "0.2.0", features = ["std", "sign_ext"] } # AssemblyScript uses sign extensions parity-wasm = { version = "0.45", features = ["std", "sign_ext"] } + +serde_yaml = { workspace = true } diff --git a/runtime/wasm/src/asc_abi/class.rs b/runtime/wasm/src/asc_abi/class.rs index 366ff844b08..1fae1ad9ce0 100644 --- a/runtime/wasm/src/asc_abi/class.rs +++ b/runtime/wasm/src/asc_abi/class.rs @@ -398,6 +398,17 @@ impl AscIndexId for Array> { const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::ArrayBigDecimal; } +impl AscIndexId for Array>> { + const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::YamlArrayValue; +} + +impl AscIndexId + for Array, AscEnum>>> +{ + const INDEX_ASC_TYPE_ID: IndexForAscTypeId = + IndexForAscTypeId::YamlArrayTypedMapEntryValueValue; +} + /// Represents any `AscValue` since they all fit in 64 bits. #[repr(C)] #[derive(Copy, Clone, Default)] @@ -505,6 +516,10 @@ impl AscIndexId for AscEnum { const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::JsonValue; } +impl AscIndexId for AscEnum { + const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::YamlValue; +} + pub type AscEnumArray = AscPtr>>>; #[repr(u32)] @@ -613,6 +628,10 @@ impl AscIndexId for AscTypedMapEntry> { const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::TypedMapEntryStringJsonValue; } +impl AscIndexId for AscTypedMapEntry, AscEnum> { + const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::YamlTypedMapEntryValueValue; +} + pub(crate) type AscTypedMapEntryArray = Array>>; #[repr(C)] @@ -638,6 +657,10 @@ impl AscIndexId for AscTypedMap, AscEnum> { + const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::YamlTypedMapValueValue; +} + pub type AscEntity = AscTypedMap>; pub(crate) type AscJson = AscTypedMap>; @@ -725,6 +748,10 @@ impl AscIndexId for AscResult>, bool> { const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::ResultJsonValueBool; } +impl AscIndexId for AscResult>, bool> { + const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::YamlResultValueBool; +} + #[repr(C)] #[derive(AscType, Copy, Clone)] pub struct AscWrapped { @@ -742,3 +769,54 @@ impl AscIndexId for AscWrapped { impl AscIndexId for AscWrapped>> { const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::WrappedJsonValue; } + +impl AscIndexId for AscWrapped>> { + const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::YamlWrappedValue; +} + +#[repr(u32)] +#[derive(AscType, Clone, Copy)] +pub enum YamlValueKind { + Null, + Bool, + Number, + String, + Array, + Object, + Tagged, +} + +impl Default for YamlValueKind { + fn default() -> Self { + YamlValueKind::Null + } +} + +impl AscValue for YamlValueKind {} + +impl YamlValueKind { + pub(crate) fn get_kind(value: &serde_yaml::Value) -> Self { + use serde_yaml::Value; + + match value { + Value::Null => Self::Null, + Value::Bool(_) => Self::Bool, + Value::Number(_) => Self::Number, + Value::String(_) => Self::String, + Value::Sequence(_) => Self::Array, + Value::Mapping(_) => Self::Object, + Value::Tagged(_) => Self::Tagged, + } + } +} + +#[repr(C)] +#[derive(AscType)] +pub struct AscYamlTaggedValue { + pub tag: AscPtr, + pub value: AscPtr>, +} + +impl AscIndexId for AscYamlTaggedValue { + const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::YamlTaggedValue; +} diff --git a/runtime/wasm/src/host.rs b/runtime/wasm/src/host.rs index 3ecee7ba753..bc5610a63d0 100644 --- a/runtime/wasm/src/host.rs +++ b/runtime/wasm/src/host.rs @@ -142,11 +142,7 @@ where ens_lookup, )); - let host_fns = data_source - .as_onchain() - .map(|ds| runtime_adapter.host_fns(ds)) - .transpose()? - .unwrap_or_default(); + let host_fns = runtime_adapter.host_fns(&data_source).unwrap_or_default(); Ok(RuntimeHost { host_fns: Arc::new(host_fns), @@ -366,6 +362,7 @@ impl RuntimeHostTrait for RuntimeHost { match self.data_source() { DataSource::Onchain(_) => None, DataSource::Offchain(ds) => ds.done_at(), + DataSource::Subgraph(_) => None, } } @@ -373,6 +370,7 @@ impl RuntimeHostTrait for RuntimeHost { match self.data_source() { DataSource::Onchain(_) => {} DataSource::Offchain(ds) => ds.set_done_at(block), + DataSource::Subgraph(_) => {} } } diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index 78921bbcf34..bd1c8706c4a 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -350,9 +350,12 @@ impl HostExports { state.metrics.track_entity_write(&entity_type, &entity); - state - .entity_cache - .set(key, entity, Some(&mut state.write_capacity_remaining))?; + state.entity_cache.set( + key, + entity, + block, + Some(&mut state.write_capacity_remaining), + )?; Ok(()) } @@ -1233,6 +1236,36 @@ impl HostExports { .map(|mut tokens| tokens.pop().unwrap()) .context("Failed to decode") } + + pub(crate) fn yaml_from_bytes( + &self, + bytes: &[u8], + gas: &GasCounter, + state: &mut BlockState, + ) -> Result { + const YAML_MAX_SIZE_BYTES: usize = 10_000_000; + + Self::track_gas_and_ops( + gas, + state, + gas::YAML_FROM_BYTES.with_args(complexity::Size, bytes), + "yaml_from_bytes", + )?; + + if bytes.len() > YAML_MAX_SIZE_BYTES { + return Err(DeterministicHostError::Other( + anyhow!( + "YAML size exceeds max size of {} bytes", + YAML_MAX_SIZE_BYTES + ) + .into(), + )); + } + + serde_yaml::from_slice(bytes) + .context("failed to parse YAML from bytes") + .map_err(DeterministicHostError::from) + } } fn string_to_h160(string: &str) -> Result { diff --git a/runtime/wasm/src/module/context.rs b/runtime/wasm/src/module/context.rs index ddf8eba3f1d..03cbf244c23 100644 --- a/runtime/wasm/src/module/context.rs +++ b/runtime/wasm/src/module/context.rs @@ -1188,4 +1188,64 @@ impl WasmInstanceContext<'_> { "`box.profile` has been removed." ))) } + + /// function yaml.fromBytes(bytes: Bytes): YAMLValue + pub fn yaml_from_bytes( + &mut self, + gas: &GasCounter, + bytes_ptr: AscPtr, + ) -> Result>, HostExportError> { + let bytes: Vec = asc_get(self, bytes_ptr, gas)?; + let host_exports = self.as_ref().ctx.host_exports.cheap_clone(); + let ctx = &mut self.as_mut().ctx; + + let yaml_value = host_exports + .yaml_from_bytes(&bytes, gas, &mut ctx.state) + .inspect_err(|_| { + debug!( + &self.as_ref().ctx.logger, + "Failed to parse YAML from byte array"; + "bytes" => truncate_yaml_bytes_for_logging(&bytes), + ); + })?; + + asc_new(self, &yaml_value, gas) + } + + /// function yaml.try_fromBytes(bytes: Bytes): Result + pub fn yaml_try_from_bytes( + &mut self, + gas: &GasCounter, + bytes_ptr: AscPtr, + ) -> Result>, bool>>, HostExportError> { + let bytes: Vec = asc_get(self, bytes_ptr, gas)?; + let host_exports = self.as_ref().ctx.host_exports.cheap_clone(); + let ctx = &mut self.as_mut().ctx; + + let result = host_exports + .yaml_from_bytes(&bytes, gas, &mut ctx.state) + .map_err(|err| { + warn!( + &self.as_ref().ctx.logger, + "Failed to parse YAML from byte array"; + "bytes" => truncate_yaml_bytes_for_logging(&bytes), + "error" => format!("{:#}", err), + ); + + true + }); + + asc_new(self, &result, gas) + } +} + +/// For debugging, it might be useful to know exactly which bytes could not be parsed as YAML, but +/// since we can parse large YAML documents, even one bad mapping could produce terabytes of logs. +/// To avoid this, we only log the first 1024 bytes of the failed YAML source. +fn truncate_yaml_bytes_for_logging(bytes: &[u8]) -> String { + if bytes.len() > 1024 { + return format!("(truncated) 0x{}", hex::encode(&bytes[..1024])); + } + + format!("0x{}", hex::encode(bytes)) } diff --git a/runtime/wasm/src/module/instance.rs b/runtime/wasm/src/module/instance.rs index 55d3e8574d2..63845e81c60 100644 --- a/runtime/wasm/src/module/instance.rs +++ b/runtime/wasm/src/module/instance.rs @@ -468,6 +468,9 @@ impl WasmInstance { link!("json.toF64", json_to_f64, ptr); link!("json.toBigInt", json_to_big_int, ptr); + link!("yaml.fromBytes", yaml_from_bytes, ptr); + link!("yaml.try_fromBytes", yaml_try_from_bytes, ptr); + link!("crypto.keccak256", crypto_keccak_256, ptr); link!("bigInt.plus", big_int_plus, x_ptr, y_ptr); diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index ffe4f7aba8e..4b01b3a5fd8 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -4,6 +4,7 @@ use std::mem::MaybeUninit; use anyhow::anyhow; use anyhow::Error; use graph::blockchain::Blockchain; +use graph::data_source::subgraph; use graph::util::mem::init_slice; use semver::Version; use wasmtime::AsContext; @@ -69,6 +70,26 @@ impl ToAscPtr for offchain::TriggerData { } } +impl ToAscPtr for subgraph::TriggerData { + fn to_asc_ptr( + self, + heap: &mut H, + gas: &GasCounter, + ) -> Result, HostExportError> { + asc_new(heap, &self.entity, gas).map(|ptr| ptr.erase()) + } +} + +impl ToAscPtr for subgraph::MappingEntityTrigger { + fn to_asc_ptr( + self, + heap: &mut H, + gas: &GasCounter, + ) -> Result, HostExportError> { + asc_new(heap, &self.data.entity, gas).map(|ptr| ptr.erase()) + } +} + impl ToAscPtr for MappingTrigger where C::MappingTrigger: ToAscPtr, @@ -81,6 +102,7 @@ where match self { MappingTrigger::Onchain(trigger) => trigger.to_asc_ptr(heap, gas), MappingTrigger::Offchain(trigger) => trigger.to_asc_ptr(heap, gas), + MappingTrigger::Subgraph(trigger) => trigger.to_asc_ptr(heap, gas), } } } diff --git a/runtime/wasm/src/to_from/external.rs b/runtime/wasm/src/to_from/external.rs index f08eacee94f..9bbe0298abc 100644 --- a/runtime/wasm/src/to_from/external.rs +++ b/runtime/wasm/src/to_from/external.rs @@ -1,15 +1,18 @@ use ethabi; +use graph::blockchain::block_stream::{EntityOperationKind, EntitySourceOperation}; use graph::data::store::scalar::Timestamp; use graph::data::value::Word; use graph::prelude::{BigDecimal, BigInt}; use graph::runtime::gas::GasCounter; use graph::runtime::{ - asc_get, asc_new, AscIndexId, AscPtr, AscType, AscValue, HostExportError, ToAscObj, + asc_get, asc_new, AscIndexId, AscPtr, AscType, AscValue, HostExportError, IndexForAscTypeId, + ToAscObj, }; use graph::{data::store, runtime::DeterministicHostError}; use graph::{prelude::serde_json, runtime::FromAscObj}; use graph::{prelude::web3::types as web3, runtime::AscHeap}; +use graph_runtime_derive::AscType; use crate::asc_abi::class::*; @@ -463,3 +466,94 @@ where }) } } + +#[derive(Debug, Clone, Eq, PartialEq, AscType)] +pub enum AscSubgraphEntityOp { + Create, + Modify, + Delete, +} + +#[derive(AscType)] +pub struct AscEntityTrigger { + pub entity_op: AscSubgraphEntityOp, + pub entity_type: AscPtr, + pub entity: AscPtr, + pub vid: i64, +} + +impl ToAscObj for EntitySourceOperation { + fn to_asc_obj( + &self, + heap: &mut H, + gas: &GasCounter, + ) -> Result { + let entity_op = match self.entity_op { + EntityOperationKind::Create => AscSubgraphEntityOp::Create, + EntityOperationKind::Modify => AscSubgraphEntityOp::Modify, + EntityOperationKind::Delete => AscSubgraphEntityOp::Delete, + }; + + Ok(AscEntityTrigger { + entity_op, + entity_type: asc_new(heap, &self.entity_type.as_str(), gas)?, + entity: asc_new(heap, &self.entity.sorted_ref(), gas)?, + vid: self.vid, + }) + } +} + +impl AscIndexId for AscEntityTrigger { + const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::AscEntityTrigger; +} + +impl ToAscObj> for serde_yaml::Value { + fn to_asc_obj( + &self, + heap: &mut H, + gas: &GasCounter, + ) -> Result, HostExportError> { + use serde_yaml::Value; + + let payload = match self { + Value::Null => EnumPayload(0), + Value::Bool(val) => EnumPayload::from(*val), + Value::Number(val) => asc_new(heap, &val.to_string(), gas)?.into(), + Value::String(val) => asc_new(heap, val, gas)?.into(), + Value::Sequence(val) => asc_new(heap, val.as_slice(), gas)?.into(), + Value::Mapping(val) => asc_new(heap, val, gas)?.into(), + Value::Tagged(val) => asc_new(heap, val.as_ref(), gas)?.into(), + }; + + Ok(AscEnum { + kind: YamlValueKind::get_kind(self), + _padding: 0, + payload, + }) + } +} + +impl ToAscObj, AscEnum>> for serde_yaml::Mapping { + fn to_asc_obj( + &self, + heap: &mut H, + gas: &GasCounter, + ) -> Result, AscEnum>, HostExportError> { + Ok(AscTypedMap { + entries: asc_new(heap, &*self.iter().collect::>(), gas)?, + }) + } +} + +impl ToAscObj for serde_yaml::value::TaggedValue { + fn to_asc_obj( + &self, + heap: &mut H, + gas: &GasCounter, + ) -> Result { + Ok(AscYamlTaggedValue { + tag: asc_new(heap, &self.tag.to_string(), gas)?, + value: asc_new(heap, &self.value, gas)?, + }) + } +} diff --git a/server/graphman/src/entities/mod.rs b/server/graphman/src/entities/mod.rs index 8f4b2d8c018..c8d3330c9f7 100644 --- a/server/graphman/src/entities/mod.rs +++ b/server/graphman/src/entities/mod.rs @@ -10,6 +10,7 @@ mod empty_response; mod execution; mod execution_id; mod subgraph_health; +mod warning_response; pub use self::block_hash::BlockHash; pub use self::block_number::BlockNumber; @@ -23,3 +24,4 @@ pub use self::empty_response::EmptyResponse; pub use self::execution::Execution; pub use self::execution_id::ExecutionId; pub use self::subgraph_health::SubgraphHealth; +pub use self::warning_response::CompletedWithWarnings; diff --git a/server/graphman/src/entities/warning_response.rs b/server/graphman/src/entities/warning_response.rs new file mode 100644 index 00000000000..0bb56aab59b --- /dev/null +++ b/server/graphman/src/entities/warning_response.rs @@ -0,0 +1,16 @@ +use async_graphql::SimpleObject; + +#[derive(Clone, Debug, SimpleObject)] +pub struct CompletedWithWarnings { + pub success: bool, + pub warnings: Vec, +} + +impl CompletedWithWarnings { + pub fn new(warnings: Vec) -> Self { + Self { + success: true, + warnings, + } + } +} diff --git a/server/graphman/src/resolvers/deployment_mutation.rs b/server/graphman/src/resolvers/deployment_mutation.rs index 983897391cf..8848578ce27 100644 --- a/server/graphman/src/resolvers/deployment_mutation.rs +++ b/server/graphman/src/resolvers/deployment_mutation.rs @@ -1,10 +1,15 @@ use std::sync::Arc; +use anyhow::anyhow; use async_graphql::Context; use async_graphql::Object; use async_graphql::Result; +use async_graphql::Union; +use graph::prelude::NodeId; use graph_store_postgres::graphman::GraphmanStore; +use graphman::commands::deployment::reassign::ReassignResult; +use crate::entities::CompletedWithWarnings; use crate::entities::DeploymentSelector; use crate::entities::EmptyResponse; use crate::entities::ExecutionId; @@ -12,12 +17,20 @@ use crate::resolvers::context::GraphmanContext; mod create; mod pause; +mod reassign; mod remove; mod restart; mod resume; +mod unassign; pub struct DeploymentMutation; +#[derive(Clone, Debug, Union)] +pub enum ReassignResponse { + EmptyResponse(EmptyResponse), + CompletedWithWarnings(CompletedWithWarnings), +} + /// Mutations related to one or multiple deployments. #[Object] impl DeploymentMutation { @@ -81,4 +94,39 @@ impl DeploymentMutation { remove::run(&ctx, &name)?; Ok(EmptyResponse::new()) } + + /// Unassign a deployment + pub async fn unassign( + &self, + ctx: &Context<'_>, + deployment: DeploymentSelector, + ) -> Result { + let ctx = GraphmanContext::new(ctx)?; + let deployment = deployment.try_into()?; + + unassign::run(&ctx, &deployment)?; + + Ok(EmptyResponse::new()) + } + + /// Assign or reassign a deployment + pub async fn reassign( + &self, + ctx: &Context<'_>, + deployment: DeploymentSelector, + node: String, + ) -> Result { + let ctx = GraphmanContext::new(ctx)?; + let deployment = deployment.try_into()?; + let node = NodeId::new(node.clone()).map_err(|()| anyhow!("illegal node id `{}`", node))?; + let reassign_result = reassign::run(&ctx, &deployment, &node)?; + match reassign_result { + ReassignResult::CompletedWithWarnings(warnings) => Ok( + ReassignResponse::CompletedWithWarnings(CompletedWithWarnings::new(warnings)), + ), + ReassignResult::EmptyResponse => { + Ok(ReassignResponse::EmptyResponse(EmptyResponse::new())) + } + } + } } diff --git a/server/graphman/src/resolvers/deployment_mutation/pause.rs b/server/graphman/src/resolvers/deployment_mutation/pause.rs index 8ba1f73446b..c16c505c178 100644 --- a/server/graphman/src/resolvers/deployment_mutation/pause.rs +++ b/server/graphman/src/resolvers/deployment_mutation/pause.rs @@ -1,18 +1,29 @@ use async_graphql::Result; -use graphman::commands::deployment::pause::load_active_deployment; -use graphman::commands::deployment::pause::pause_active_deployment; +use graphman::commands::deployment::pause::{ + load_active_deployment, pause_active_deployment, PauseDeploymentError, +}; use graphman::deployment::DeploymentSelector; use crate::resolvers::context::GraphmanContext; pub fn run(ctx: &GraphmanContext, deployment: &DeploymentSelector) -> Result<()> { - let active_deployment = load_active_deployment(ctx.primary_pool.clone(), deployment)?; + let active_deployment = load_active_deployment(ctx.primary_pool.clone(), deployment); - pause_active_deployment( - ctx.primary_pool.clone(), - ctx.notification_sender.clone(), - active_deployment, - )?; + match active_deployment { + Ok(active_deployment) => { + pause_active_deployment( + ctx.primary_pool.clone(), + ctx.notification_sender.clone(), + active_deployment, + )?; + } + Err(PauseDeploymentError::AlreadyPaused(_)) => { + return Ok(()); + } + Err(PauseDeploymentError::Common(e)) => { + return Err(e.into()); + } + } Ok(()) } diff --git a/server/graphman/src/resolvers/deployment_mutation/reassign.rs b/server/graphman/src/resolvers/deployment_mutation/reassign.rs new file mode 100644 index 00000000000..3887d67032a --- /dev/null +++ b/server/graphman/src/resolvers/deployment_mutation/reassign.rs @@ -0,0 +1,24 @@ +use anyhow::Ok; +use async_graphql::Result; +use graph::prelude::NodeId; +use graphman::commands::deployment::reassign::load_deployment; +use graphman::commands::deployment::reassign::reassign_deployment; +use graphman::commands::deployment::reassign::ReassignResult; +use graphman::deployment::DeploymentSelector; + +use crate::resolvers::context::GraphmanContext; + +pub fn run( + ctx: &GraphmanContext, + deployment: &DeploymentSelector, + node: &NodeId, +) -> Result { + let deployment = load_deployment(ctx.primary_pool.clone(), deployment)?; + let reassign_result = reassign_deployment( + ctx.primary_pool.clone(), + ctx.notification_sender.clone(), + &deployment, + &node, + )?; + Ok(reassign_result) +} diff --git a/server/graphman/src/resolvers/deployment_mutation/unassign.rs b/server/graphman/src/resolvers/deployment_mutation/unassign.rs new file mode 100644 index 00000000000..4af620e8568 --- /dev/null +++ b/server/graphman/src/resolvers/deployment_mutation/unassign.rs @@ -0,0 +1,17 @@ +use async_graphql::Result; +use graphman::commands::deployment::unassign::load_assigned_deployment; +use graphman::commands::deployment::unassign::unassign_deployment; +use graphman::deployment::DeploymentSelector; + +use crate::resolvers::context::GraphmanContext; + +pub fn run(ctx: &GraphmanContext, deployment: &DeploymentSelector) -> Result<()> { + let deployment = load_assigned_deployment(ctx.primary_pool.clone(), deployment)?; + unassign_deployment( + ctx.primary_pool.clone(), + ctx.notification_sender.clone(), + deployment, + )?; + + Ok(()) +} diff --git a/server/graphman/tests/deployment_mutation.rs b/server/graphman/tests/deployment_mutation.rs index 927cf5bc87a..88f4a9a5180 100644 --- a/server/graphman/tests/deployment_mutation.rs +++ b/server/graphman/tests/deployment_mutation.rs @@ -2,10 +2,12 @@ pub mod util; use std::time::Duration; +use graph::components::store::SubgraphStore; use graph::prelude::DeploymentHash; use serde::Deserialize; use serde_json::json; use test_store::create_test_subgraph; +use test_store::SUBGRAPH_STORE; use tokio::time::sleep; use self::util::client::send_graphql_request; @@ -390,3 +392,205 @@ fn graphql_cannot_remove_subgraph_with_invalid_name() { assert_ne!(resp, success_resp); }); } + +#[test] +fn graphql_can_unassign_deployments() { + run_test(|| async { + let deployment_hash = DeploymentHash::new("subgraph_1").unwrap(); + create_test_subgraph(&deployment_hash, TEST_SUBGRAPH_SCHEMA).await; + + let unassign_req = send_graphql_request( + json!({ + "query": r#"mutation { + deployment { + unassign(deployment: { hash: "subgraph_1" }){ + success + } + } + }"# + }), + VALID_TOKEN, + ) + .await; + + let expected_resp = json!({ + "data": { + "deployment": { + "unassign": { + "success": true, + } + } + } + }); + + let subgraph_node_id = send_graphql_request( + json!({ + "query": r#"{ + deployment { + info(deployment: { hash: "subgraph_1" }) { + nodeId + } + } + }"# + }), + VALID_TOKEN, + ) + .await; + + let is_node_null = subgraph_node_id["data"]["deployment"]["info"][0]["nodeId"].is_null(); + + assert_eq!(unassign_req, expected_resp); + assert_eq!(is_node_null, true); + }); +} + +#[test] +fn graphql_cannot_unassign_deployments_twice() { + run_test(|| async { + let deployment_hash = DeploymentHash::new("subgraph_1").unwrap(); + create_test_subgraph(&deployment_hash, TEST_SUBGRAPH_SCHEMA).await; + + send_graphql_request( + json!({ + "query": r#"mutation { + deployment { + unassign(deployment: { hash: "subgraph_1" }){ + success + } + } + }"# + }), + VALID_TOKEN, + ) + .await; + + let unassign_again = send_graphql_request( + json!({ + "query": r#"mutation { + deployment { + unassign(deployment: { hash: "subgraph_1" }){ + success + } + } + }"# + }), + VALID_TOKEN, + ) + .await; + + let expected_resp = json!({ + "data": { + "deployment": { + "unassign": { + "success": true, + } + } + } + }); + + assert_ne!(unassign_again, expected_resp); + }); +} + +#[test] +fn graphql_can_reassign_deployment() { + run_test(|| async { + let deployment_hash = DeploymentHash::new("subgraph_1").unwrap(); + create_test_subgraph(&deployment_hash, TEST_SUBGRAPH_SCHEMA).await; + + let deployment_hash = DeploymentHash::new("subgraph_2").unwrap(); + let locator = create_test_subgraph(&deployment_hash, TEST_SUBGRAPH_SCHEMA).await; + + send_graphql_request( + json!({ + "query": r#"mutation { + deployment { + unassign(deployment: { hash: "subgraph_1" }){ + success + } + } + }"# + }), + VALID_TOKEN, + ) + .await; + + let node = SUBGRAPH_STORE.assigned_node(&locator).unwrap().unwrap(); + + let reassign = send_graphql_request( + json!({ + "query": r#"mutation ReassignDeployment($node: String!) { + deployment { + reassign(deployment: { hash: "subgraph_1" }, node: $node) { + ... on EmptyResponse { + success + } + ... on CompletedWithWarnings { + success + warnings + } + } + } + }"#, + "variables": { + "node": node.to_string(), + } + }), + VALID_TOKEN, + ) + .await; + + let expected_resp = json!({ + "data": { + "deployment": { + "reassign": { + "success": true, + } + } + } + }); + + assert_eq!(reassign, expected_resp); + }); +} + +#[test] +fn graphql_warns_reassign_on_wrong_node_id() { + run_test(|| async { + let deployment_hash = DeploymentHash::new("subgraph_1").unwrap(); + create_test_subgraph(&deployment_hash, TEST_SUBGRAPH_SCHEMA).await; + + let reassign = send_graphql_request( + json!({ + "query": r#"mutation { + deployment { + reassign(deployment: { hash: "subgraph_1" }, node: "invalid_node") { + ... on EmptyResponse { + success + } + ... on CompletedWithWarnings { + success + warnings + } + } + } + }"# + }), + VALID_TOKEN, + ) + .await; + + let expected_resp = json!({ + "data": { + "deployment": { + "reassign": { + "success": true, + "warnings": ["This is the only deployment assigned to 'invalid_node'. Please make sure that the node ID is spelled correctly."], + } + } + } + }); + + assert_eq!(reassign, expected_resp); + }); +} diff --git a/store/postgres/src/block_range.rs b/store/postgres/src/block_range.rs index f05c4e73869..7dbcaa29c00 100644 --- a/store/postgres/src/block_range.rs +++ b/store/postgres/src/block_range.rs @@ -50,7 +50,7 @@ lazy_static! { /// The range of blocks for which an entity is valid. We need this struct /// to bind ranges into Diesel queries. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Copy)] pub struct BlockRange(Bound, Bound); pub(crate) fn first_block_in_range( @@ -132,6 +132,87 @@ impl<'a> QueryFragment for BlockRangeUpperBoundClause<'a> { } } +#[derive(Debug, Clone, Copy)] +pub enum BoundSide { + Lower, + Upper, +} + +/// Helper for generating SQL fragments for selecting entities in a specific block range +#[derive(Debug, Clone, Copy)] +pub enum EntityBlockRange { + Mutable((BlockRange, BoundSide)), + Immutable(BlockRange), +} + +impl EntityBlockRange { + pub fn new( + immutable: bool, + block_range: std::ops::Range, + bound_side: BoundSide, + ) -> Self { + let start: Bound = Bound::Included(block_range.start); + let end: Bound = Bound::Excluded(block_range.end); + let block_range: BlockRange = BlockRange(start, end); + if immutable { + Self::Immutable(block_range) + } else { + Self::Mutable((block_range, bound_side)) + } + } + + /// Outputs SQL that matches only rows whose entities would trigger a change + /// event (Create, Modify, Delete) in a given interval of blocks. Otherwise said + /// a block_range border is contained in an interval of blocks. For instance + /// one of the following: + /// lower(block_range) >= $1 and lower(block_range) <= $2 + /// upper(block_range) >= $1 and upper(block_range) <= $2 + /// block$ >= $1 and block$ <= $2 + pub fn contains<'b>(&'b self, out: &mut AstPass<'_, 'b, Pg>) -> QueryResult<()> { + out.unsafe_to_cache_prepared(); + let block_range = match self { + EntityBlockRange::Mutable((br, _)) => br, + EntityBlockRange::Immutable(br) => br, + }; + let BlockRange(start, finish) = block_range; + + self.compare_column(out); + out.push_sql(">= "); + match start { + Bound::Included(block) => out.push_bind_param::(block)?, + Bound::Excluded(block) => { + out.push_bind_param::(block)?; + out.push_sql("+1"); + } + Bound::Unbounded => unimplemented!(), + }; + out.push_sql(" and"); + self.compare_column(out); + out.push_sql("<= "); + match finish { + Bound::Included(block) => { + out.push_bind_param::(block)?; + out.push_sql("+1"); + } + Bound::Excluded(block) => out.push_bind_param::(block)?, + Bound::Unbounded => unimplemented!(), + }; + Ok(()) + } + + pub fn compare_column(&self, out: &mut AstPass) { + match self { + EntityBlockRange::Mutable((_, BoundSide::Lower)) => { + out.push_sql(" lower(block_range) ") + } + EntityBlockRange::Mutable((_, BoundSide::Upper)) => { + out.push_sql(" upper(block_range) ") + } + EntityBlockRange::Immutable(_) => out.push_sql(" block$ "), + } + } +} + /// Helper for generating various SQL fragments for handling the block range /// of entity versions #[allow(unused)] diff --git a/store/postgres/src/block_store.rs b/store/postgres/src/block_store.rs index efaca838d59..9af40b8d2a0 100644 --- a/store/postgres/src/block_store.rs +++ b/store/postgres/src/block_store.rs @@ -33,6 +33,10 @@ use self::primary::Chain; #[cfg(debug_assertions)] pub const FAKE_NETWORK_SHARED: &str = "fake_network_shared"; +// Highest version of the database that the executable supports. +// To be incremented on each breaking change to the database. +const SUPPORTED_DB_VERSION: i64 = 3; + /// The status of a chain: whether we can only read from the chain, or /// whether it is ok to ingest from it, too #[derive(Copy, Clone)] @@ -531,6 +535,18 @@ impl BlockStore { .set(dbv::version.eq(3)) .execute(&mut conn)?; }; + if version < SUPPORTED_DB_VERSION { + // Bump it to make sure that all executables are working with the same DB format + diesel::update(dbv::table) + .set(dbv::version.eq(SUPPORTED_DB_VERSION)) + .execute(&mut conn)?; + }; + if version > SUPPORTED_DB_VERSION { + panic!( + "The executable is too old and doesn't support the database version: {}", + version + ) + } Ok(()) } diff --git a/store/postgres/src/catalog.rs b/store/postgres/src/catalog.rs index dc73ec6f7f5..8e988e31522 100644 --- a/store/postgres/src/catalog.rs +++ b/store/postgres/src/catalog.rs @@ -912,3 +912,31 @@ fn has_minmax_multi_ops(conn: &mut PgConnection) -> Result { Ok(sql_query(QUERY).get_result::(conn)?.has_ops) } + +pub(crate) fn histogram_bounds( + conn: &mut PgConnection, + namespace: &Namespace, + table: &SqlName, + column: &str, +) -> Result, StoreError> { + const QUERY: &str = "select histogram_bounds::text::int8[] bounds \ + from pg_stats \ + where schemaname = $1 \ + and tablename = $2 \ + and attname = $3"; + + #[derive(Queryable, QueryableByName)] + struct Bounds { + #[diesel(sql_type = Array)] + bounds: Vec, + } + + sql_query(QUERY) + .bind::(namespace.as_str()) + .bind::(table.as_str()) + .bind::(column) + .get_result::(conn) + .optional() + .map(|bounds| bounds.map(|b| b.bounds).unwrap_or_default()) + .map_err(StoreError::from) +} diff --git a/store/postgres/src/chain_store.rs b/store/postgres/src/chain_store.rs index b399b15b788..097aa799eff 100644 --- a/store/postgres/src/chain_store.rs +++ b/store/postgres/src/chain_store.rs @@ -1,3 +1,4 @@ +use anyhow::anyhow; use diesel::pg::PgConnection; use diesel::prelude::*; use diesel::r2d2::{ConnectionManager, PooledConnection}; @@ -13,6 +14,7 @@ use graph::slog::Logger; use graph::stable_hash::crypto_stable_hash; use graph::util::herd_cache::HerdCache; +use std::collections::BTreeMap; use std::{ collections::HashMap, convert::{TryFrom, TryInto}, @@ -20,9 +22,9 @@ use std::{ sync::Arc, }; -use graph::blockchain::{Block, BlockHash, ChainIdentifier}; +use graph::blockchain::{Block, BlockHash, ChainIdentifier, ExtendedBlockPtr}; use graph::cheap_clone::CheapClone; -use graph::prelude::web3::types::H256; +use graph::prelude::web3::types::{H256, U256}; use graph::prelude::{ async_trait, serde_json as json, transaction_receipt::LightTransactionReceipt, BlockNumber, BlockPtr, CachedEthereumCall, CancelableError, ChainStore as ChainStoreTrait, Error, @@ -52,6 +54,14 @@ impl JsonBlock { data, } } + + fn timestamp(&self) -> Option { + self.data + .as_ref() + .and_then(|data| data.get("timestamp")) + .and_then(|ts| ts.as_str()) + .and_then(|ts| U256::from_dec_str(ts).ok()) + } } /// Tables in the 'public' database schema that store chain-specific data @@ -579,6 +589,50 @@ mod data { Ok(()) } + pub(super) fn block_ptrs_by_numbers( + &self, + conn: &mut PgConnection, + chain: &str, + numbers: &[BlockNumber], + ) -> Result, StoreError> { + let x = match self { + Storage::Shared => { + use public::ethereum_blocks as b; + + b::table + .select(( + b::hash, + b::number, + b::parent_hash, + sql::("coalesce(data -> 'block', data)"), + )) + .filter(b::network_name.eq(chain)) + .filter(b::number.eq_any(Vec::from_iter(numbers.iter().map(|&n| n as i64)))) + .load::<(BlockHash, i64, BlockHash, json::Value)>(conn) + } + Storage::Private(Schema { blocks, .. }) => blocks + .table() + .select(( + blocks.hash(), + blocks.number(), + blocks.parent_hash(), + sql::("coalesce(data -> 'block', data)"), + )) + .filter( + blocks + .number() + .eq_any(Vec::from_iter(numbers.iter().map(|&n| n as i64))), + ) + .load::<(BlockHash, i64, BlockHash, json::Value)>(conn), + }?; + + Ok(x.into_iter() + .map(|(hash, nr, parent, data)| { + JsonBlock::new(BlockPtr::new(hash, nr as i32), parent, Some(data)) + }) + .collect()) + } + pub(super) fn blocks( &self, conn: &mut PgConnection, @@ -1651,7 +1705,10 @@ impl ChainStoreMetrics { } #[derive(Clone, CheapClone)] -struct BlocksLookupResult(Arc, StoreError>>); +enum BlocksLookupResult { + ByHash(Arc, StoreError>>), + ByNumber(Arc>, StoreError>>), +} pub struct ChainStore { logger: Logger, @@ -1870,8 +1927,52 @@ impl ChainStore { .await?; Ok(values) } + + async fn blocks_from_store_by_numbers( + self: &Arc, + numbers: Vec, + ) -> Result>, StoreError> { + let store = self.cheap_clone(); + let pool = self.pool.clone(); + + let values = pool + .with_conn(move |conn, _| { + store + .storage + .block_ptrs_by_numbers(conn, &store.chain, &numbers) + .map_err(CancelableError::from) + }) + .await?; + + let mut block_map = BTreeMap::new(); + + for block in values { + let block_number = block.ptr.block_number(); + block_map + .entry(block_number) + .or_insert_with(Vec::new) + .push(block); + } + + Ok(block_map) + } } +fn json_block_to_block_ptr_ext(json_block: &JsonBlock) -> Result { + let hash = json_block.ptr.hash.clone(); + let number = json_block.ptr.number; + let parent_hash = json_block.parent_hash.clone(); + + let timestamp = json_block + .timestamp() + .ok_or_else(|| anyhow!("Timestamp is missing"))?; + + let ptr = + ExtendedBlockPtr::try_from((hash.as_h256(), number, parent_hash.as_h256(), timestamp)) + .map_err(|e| anyhow!("Failed to convert to ExtendedBlockPtr: {}", e))?; + + Ok(ptr) +} #[async_trait] impl ChainStoreTrait for ChainStore { fn genesis_block_ptr(&self) -> Result { @@ -2065,6 +2166,85 @@ impl ChainStoreTrait for ChainStore { Ok(()) } + async fn block_ptrs_by_numbers( + self: Arc, + numbers: Vec, + ) -> Result>, Error> { + let result = if ENV_VARS.store.disable_block_cache_for_lookup { + let values = self.blocks_from_store_by_numbers(numbers).await?; + + values + } else { + let cached = self.recent_blocks_cache.get_block_ptrs_by_numbers(&numbers); + + let stored = if cached.len() < numbers.len() { + let missing_numbers = numbers + .iter() + .filter(|num| !cached.iter().any(|(ptr, _)| ptr.block_number() == **num)) + .cloned() + .collect::>(); + + let hash = crypto_stable_hash(&missing_numbers); + let this = self.clone(); + let lookup_fut = async move { + let res = this.blocks_from_store_by_numbers(missing_numbers).await; + BlocksLookupResult::ByNumber(Arc::new(res)) + }; + let lookup_herd = self.lookup_herd.cheap_clone(); + let logger = self.logger.cheap_clone(); + let res = match lookup_herd.cached_query(hash, lookup_fut, &logger).await { + (BlocksLookupResult::ByNumber(res), _) => res, + _ => unreachable!(), + }; + let res = Arc::try_unwrap(res).unwrap_or_else(|arc| (*arc).clone()); + + match res { + Ok(blocks) => { + for (_, blocks_for_num) in &blocks { + if blocks.len() == 1 { + self.recent_blocks_cache + .insert_block(blocks_for_num[0].clone()); + } + } + blocks + } + Err(e) => { + return Err(e.into()); + } + } + } else { + BTreeMap::new() + }; + + let cached_map = cached + .into_iter() + .map(|(ptr, data)| (ptr.block_number(), vec![data])) + .collect::>(); + + let mut result = cached_map; + for (num, blocks) in stored { + if !result.contains_key(&num) { + result.insert(num, blocks); + } + } + + result + }; + + let ptrs = result + .into_iter() + .map(|(num, blocks)| { + let ptrs = blocks + .into_iter() + .filter_map(|block| json_block_to_block_ptr_ext(&block).ok()) + .collect(); + (num, ptrs) + }) + .collect(); + + Ok(ptrs) + } + async fn blocks(self: Arc, hashes: Vec) -> Result, Error> { if ENV_VARS.store.disable_block_cache_for_lookup { let values = self @@ -2094,12 +2274,22 @@ impl ChainStoreTrait for ChainStore { let this = self.clone(); let lookup_fut = async move { let res = this.blocks_from_store(hashes).await; - BlocksLookupResult(Arc::new(res)) + BlocksLookupResult::ByHash(Arc::new(res)) }; let lookup_herd = self.lookup_herd.cheap_clone(); let logger = self.logger.cheap_clone(); - let (BlocksLookupResult(res), _) = - lookup_herd.cached_query(hash, lookup_fut, &logger).await; + // This match can only return ByHash because lookup_fut explicitly constructs + // BlocksLookupResult::ByHash. The cache preserves the exact future result, + // so ByNumber variant is structurally impossible here. + let res = match lookup_herd.cached_query(hash, lookup_fut, &logger).await { + (BlocksLookupResult::ByHash(res), _) => res, + (BlocksLookupResult::ByNumber(_), _) => { + Arc::new(Err(StoreError::Unknown(anyhow::anyhow!( + "Unexpected BlocksLookupResult::ByNumber returned from cached block lookup by hash" + )))) + } + }; + // Try to avoid cloning a non-concurrent lookup; it's not // entirely clear whether that will actually avoid a clone // since it depends on a lot of the details of how the @@ -2361,6 +2551,10 @@ mod recent_blocks_cache { .and_then(|block| block.data.as_ref().map(|data| (&block.ptr, data))) } + fn get_block_by_number(&self, number: BlockNumber) -> Option<&JsonBlock> { + self.blocks.get(&number) + } + fn get_ancestor( &self, child_ptr: &BlockPtr, @@ -2483,6 +2677,28 @@ mod recent_blocks_cache { blocks } + pub fn get_block_ptrs_by_numbers( + &self, + numbers: &[BlockNumber], + ) -> Vec<(BlockPtr, JsonBlock)> { + let inner = self.inner.read(); + let mut blocks: Vec<(BlockPtr, JsonBlock)> = Vec::new(); + + for &number in numbers { + if let Some(block) = inner.get_block_by_number(number) { + blocks.push((block.ptr.clone(), block.clone())); + } + } + + inner.metrics.record_hit_and_miss( + &inner.network, + blocks.len(), + numbers.len() - blocks.len(), + ); + + blocks + } + /// Tentatively caches the `ancestor` of a [`BlockPtr`] (`child`), together with /// its associated `data`. Note that for this to work, `child` must be /// in the cache already. The first block in the cache should be diff --git a/store/postgres/src/copy.rs b/store/postgres/src/copy.rs index c526a93c7b8..9b64390581b 100644 --- a/store/postgres/src/copy.rs +++ b/store/postgres/src/copy.rs @@ -19,21 +19,15 @@ use std::{ }; use diesel::{ - deserialize::FromSql, dsl::sql, insert_into, - pg::Pg, r2d2::{ConnectionManager, PooledConnection}, - select, - serialize::{Output, ToSql}, - sql_query, - sql_types::{BigInt, Integer}, - update, Connection as _, ExpressionMethods, OptionalExtension, PgConnection, QueryDsl, - RunQueryDsl, + select, sql_query, update, Connection as _, ExpressionMethods, OptionalExtension, PgConnection, + QueryDsl, RunQueryDsl, }; use graph::{ constraint_violation, - prelude::{info, o, warn, BlockNumber, BlockPtr, Logger, StoreError, ENV_VARS}, + prelude::{info, o, warn, BlockNumber, BlockPtr, Logger, StoreError}, schema::EntityType, }; use itertools::Itertools; @@ -43,17 +37,11 @@ use crate::{ dynds::DataSourcesTable, primary::{DeploymentId, Site}, relational::index::IndexList, + vid_batcher::{VidBatcher, VidRange}, }; use crate::{connection_pool::ConnectionPool, relational::Layout}; use crate::{relational::Table, relational_queries as rq}; -/// The initial batch size for tables that do not have an array column -const INITIAL_BATCH_SIZE: i64 = 10_000; -/// The initial batch size for tables that do have an array column; those -/// arrays can be large and large arrays will slow down copying a lot. We -/// therefore tread lightly in that case -const INITIAL_BATCH_SIZE_LIST: i64 = 100; - const LOG_INTERVAL: Duration = Duration::from_secs(3 * 60); /// If replicas are lagging by more than this, the copying code will pause @@ -212,6 +200,7 @@ impl CopyState { TableState::init( conn, dst.site.clone(), + &src, src_table.clone(), dst_table.clone(), &target_block, @@ -227,9 +216,9 @@ impl CopyState { ( cts::entity_type.eq(table.batch.dst.object.as_str()), cts::dst.eq(dst.site.id), - cts::next_vid.eq(table.batch.next_vid), - cts::target_vid.eq(table.batch.target_vid), - cts::batch_size.eq(table.batch.batch_size.size), + cts::next_vid.eq(table.batch.next_vid()), + cts::target_vid.eq(table.batch.target_vid()), + cts::batch_size.eq(table.batch.batch_size()), ) }) .collect::>(); @@ -299,51 +288,6 @@ pub(crate) fn source( .map_err(StoreError::from) } -/// Track the desired size of a batch in such a way that doing the next -/// batch gets close to TARGET_DURATION for the time it takes to copy one -/// batch, but don't step up the size by more than 2x at once -#[derive(Debug, Queryable)] -pub(crate) struct AdaptiveBatchSize { - pub size: i64, -} - -impl AdaptiveBatchSize { - pub fn new(table: &Table) -> Self { - let size = if table.columns.iter().any(|col| col.is_list()) { - INITIAL_BATCH_SIZE_LIST - } else { - INITIAL_BATCH_SIZE - }; - - Self { size } - } - - // adjust batch size by trying to extrapolate in such a way that we - // get close to TARGET_DURATION for the time it takes to copy one - // batch, but don't step up batch_size by more than 2x at once - pub fn adapt(&mut self, duration: Duration) { - // Avoid division by zero - let duration = duration.as_millis().max(1); - let new_batch_size = self.size as f64 - * ENV_VARS.store.batch_target_duration.as_millis() as f64 - / duration as f64; - self.size = (2 * self.size).min(new_batch_size.round() as i64); - } -} - -impl ToSql for AdaptiveBatchSize { - fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> diesel::serialize::Result { - >::to_sql(&self.size, out) - } -} - -impl FromSql for AdaptiveBatchSize { - fn from_sql(bytes: diesel::pg::PgValue) -> diesel::deserialize::Result { - let size = >::from_sql(bytes)?; - Ok(AdaptiveBatchSize { size }) - } -} - /// A helper to copy entities from one table to another in batches that are /// small enough to not interfere with the rest of the operations happening /// in the database. The `src` and `dst` table must have the same structure @@ -353,49 +297,42 @@ impl FromSql for AdaptiveBatchSize { pub(crate) struct BatchCopy { src: Arc, dst: Arc
, - /// The `vid` of the next entity version that we will copy - next_vid: i64, - /// The last `vid` that should be copied - target_vid: i64, - batch_size: AdaptiveBatchSize, + batcher: VidBatcher, } impl BatchCopy { - pub fn new(src: Arc
, dst: Arc
, first_vid: i64, last_vid: i64) -> Self { - let batch_size = AdaptiveBatchSize::new(&dst); - - Self { - src, - dst, - next_vid: first_vid, - target_vid: last_vid, - batch_size, - } + pub fn new(batcher: VidBatcher, src: Arc
, dst: Arc
) -> Self { + Self { src, dst, batcher } } /// Copy one batch of entities and update internal state so that the /// next call to `run` will copy the next batch pub fn run(&mut self, conn: &mut PgConnection) -> Result { - let start = Instant::now(); - - // Copy all versions with next_vid <= vid <= next_vid + batch_size - 1, - // but do not go over target_vid - let last_vid = (self.next_vid + self.batch_size.size - 1).min(self.target_vid); - rq::CopyEntityBatchQuery::new(self.dst.as_ref(), &self.src, self.next_vid, last_vid)? - .execute(conn)?; + let (duration, _) = self.batcher.step(|start, end| { + rq::CopyEntityBatchQuery::new(self.dst.as_ref(), &self.src, start, end)? + .execute(conn)?; + Ok(()) + })?; - let duration = start.elapsed(); + Ok(duration) + } - // remember how far we got - self.next_vid = last_vid + 1; + pub fn finished(&self) -> bool { + self.batcher.finished() + } - self.batch_size.adapt(duration); + /// The first `vid` that has not been copied yet + pub fn next_vid(&self) -> i64 { + self.batcher.next_vid() + } - Ok(duration) + /// The last `vid` that should be copied + pub fn target_vid(&self) -> i64 { + self.batcher.target_vid() } - pub fn finished(&self) -> bool { - self.next_vid > self.target_vid + pub fn batch_size(&self) -> i64 { + self.batcher.batch_size() as i64 } } @@ -409,34 +346,15 @@ impl TableState { fn init( conn: &mut PgConnection, dst_site: Arc, + src_layout: &Layout, src: Arc
, dst: Arc
, target_block: &BlockPtr, ) -> Result { - #[derive(QueryableByName)] - struct MaxVid { - #[diesel(sql_type = BigInt)] - max_vid: i64, - } - - let max_block_clause = if src.immutable { - "block$ <= $1" - } else { - "lower(block_range) <= $1" - }; - let target_vid = sql_query(format!( - "select coalesce(max(vid), -1) as max_vid from {} where {}", - src.qualified_name.as_str(), - max_block_clause - )) - .bind::(&target_block.number) - .load::(conn)? - .first() - .map(|v| v.max_vid) - .unwrap_or(-1); - + let vid_range = VidRange::for_copy(conn, &src, target_block)?; + let batcher = VidBatcher::load(conn, &src_layout.site.namespace, src.as_ref(), vid_range)?; Ok(Self { - batch: BatchCopy::new(src, dst, 0, target_vid), + batch: BatchCopy::new(batcher, src, dst), dst_site, duration_ms: 0, }) @@ -502,10 +420,14 @@ impl TableState { ); match (src, dst) { (Ok(src), Ok(dst)) => { - let mut batch = BatchCopy::new(src, dst, current_vid, target_vid); - let batch_size = AdaptiveBatchSize { size }; - - batch.batch_size = batch_size; + let batcher = VidBatcher::load( + conn, + &src_layout.site.namespace, + &src, + VidRange::new(current_vid, target_vid), + )? + .with_batch_size(size as usize); + let batch = BatchCopy::new(batcher, src, dst); Ok(TableState { batch, @@ -525,7 +447,6 @@ impl TableState { &mut self, conn: &mut PgConnection, elapsed: Duration, - first_batch: bool, ) -> Result<(), StoreError> { use copy_table_state as cts; @@ -533,20 +454,20 @@ impl TableState { // 300B years self.duration_ms += i64::try_from(elapsed.as_millis()).unwrap_or(0); - if first_batch { - // Reset started_at so that finished_at - started_at is an - // accurate indication of how long we worked on a table. - update( - cts::table - .filter(cts::dst.eq(self.dst_site.id)) - .filter(cts::entity_type.eq(self.batch.dst.object.as_str())), - ) - .set(cts::started_at.eq(sql("now()"))) - .execute(conn)?; - } + // Reset started_at so that finished_at - started_at is an accurate + // indication of how long we worked on a table if we haven't worked + // on the table yet. + update( + cts::table + .filter(cts::dst.eq(self.dst_site.id)) + .filter(cts::entity_type.eq(self.batch.dst.object.as_str())) + .filter(cts::duration_ms.eq(0)), + ) + .set(cts::started_at.eq(sql("now()"))) + .execute(conn)?; let values = ( - cts::next_vid.eq(self.batch.next_vid), - cts::batch_size.eq(self.batch.batch_size.size), + cts::next_vid.eq(self.batch.next_vid()), + cts::batch_size.eq(self.batch.batch_size()), cts::duration_ms.eq(self.duration_ms), ); update( @@ -591,11 +512,9 @@ impl TableState { } fn copy_batch(&mut self, conn: &mut PgConnection) -> Result { - let first_batch = self.batch.next_vid == 0; - let duration = self.batch.run(conn)?; - self.record_progress(conn, duration, first_batch)?; + self.record_progress(conn, duration)?; if self.finished() { self.record_finished(conn)?; @@ -620,12 +539,12 @@ impl<'a> CopyProgress<'a> { let target_vid: i64 = state .tables .iter() - .map(|table| table.batch.target_vid) + .map(|table| table.batch.target_vid()) .sum(); let current_vid = state .tables .iter() - .map(|table| table.batch.next_vid.min(table.batch.target_vid)) + .map(|table| table.batch.next_vid()) .sum(); Self { logger, @@ -663,18 +582,18 @@ impl<'a> CopyProgress<'a> { info!( self.logger, "Copied {:.2}% of `{}` entities ({}/{} entity versions), {:.2}% of overall data", - Self::progress_pct(batch.next_vid, batch.target_vid), + Self::progress_pct(batch.next_vid(), batch.target_vid()), batch.dst.object, - batch.next_vid, - batch.target_vid, - Self::progress_pct(self.current_vid + batch.next_vid, self.target_vid) + batch.next_vid(), + batch.target_vid(), + Self::progress_pct(self.current_vid + batch.next_vid(), self.target_vid) ); self.last_log = Instant::now(); } } fn table_finished(&mut self, batch: &BatchCopy) { - self.current_vid += batch.next_vid; + self.current_vid += batch.next_vid(); } fn finished(&self) { diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index efe05a666b9..836048912b1 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -13,6 +13,7 @@ use diesel::{ sql_query, sql_types::{Nullable, Text}, }; +use graph::semver::Version; use graph::{ blockchain::block_stream::FirehoseCursor, data::subgraph::schema::SubgraphError, @@ -305,11 +306,13 @@ pub fn debug_fork( pub fn schema(conn: &mut PgConnection, site: &Site) -> Result<(InputSchema, bool), StoreError> { use subgraph_manifest as sm; - let (s, use_bytea_prefix) = sm::table - .select((sm::schema, sm::use_bytea_prefix)) + let (s, spec_ver, use_bytea_prefix) = sm::table + .select((sm::schema, sm::spec_version, sm::use_bytea_prefix)) .filter(sm::id.eq(site.id)) - .first::<(String, bool)>(conn)?; - InputSchema::parse_latest(s.as_str(), site.deployment.clone()) + .first::<(String, String, bool)>(conn)?; + let spec_version = + Version::parse(spec_ver.as_str()).map_err(|err| StoreError::Unknown(err.into()))?; + InputSchema::parse(&spec_version, s.as_str(), site.deployment.clone()) .map_err(StoreError::Unknown) .map(|schema| (schema, use_bytea_prefix)) } diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index f5b2825f63f..99e4409d0ab 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -4,7 +4,7 @@ use diesel::pg::PgConnection; use diesel::r2d2::{ConnectionManager, PooledConnection}; use diesel::{prelude::*, sql_query}; use graph::anyhow::Context; -use graph::blockchain::block_stream::FirehoseCursor; +use graph::blockchain::block_stream::{EntitySourceOperation, FirehoseCursor}; use graph::blockchain::BlockTime; use graph::components::store::write::RowGroup; use graph::components::store::{ @@ -29,8 +29,8 @@ use lru_time_cache::LruCache; use rand::{seq::SliceRandom, thread_rng}; use std::collections::{BTreeMap, HashMap}; use std::convert::Into; -use std::ops::Deref; use std::ops::{Bound, DerefMut}; +use std::ops::{Deref, Range}; use std::str::FromStr; use std::sync::{atomic::AtomicUsize, Arc, Mutex}; use std::time::{Duration, Instant}; @@ -1063,6 +1063,18 @@ impl DeploymentStore { layout.find_many(&mut conn, ids_for_type, block) } + pub(crate) fn get_range( + &self, + site: Arc, + entity_types: Vec, + causality_region: CausalityRegion, + block_range: Range, + ) -> Result>, StoreError> { + let mut conn = self.get_conn()?; + let layout = self.layout(&mut conn, site)?; + layout.find_range(&mut conn, entity_types, causality_region, block_range) + } + pub(crate) fn get_derived( &self, site: Arc, diff --git a/store/postgres/src/lib.rs b/store/postgres/src/lib.rs index 409ce182d77..759e8601313 100644 --- a/store/postgres/src/lib.rs +++ b/store/postgres/src/lib.rs @@ -36,6 +36,7 @@ mod store; mod store_events; mod subgraph_store; pub mod transaction_receipt; +mod vid_batcher; mod writable; pub mod graphman; diff --git a/store/postgres/src/primary.rs b/store/postgres/src/primary.rs index b13d1608bf3..d62ab74da06 100644 --- a/store/postgres/src/primary.rs +++ b/store/postgres/src/primary.rs @@ -1765,10 +1765,10 @@ impl<'a> Connection<'a> { Ok(s::table .inner_join( - v::table.on(v::subgraph + v::table.on(v::id .nullable() .eq(s::current_version) - .or(v::subgraph.nullable().eq(s::pending_version))), + .or(v::id.nullable().eq(s::pending_version))), ) .filter(v::deployment.eq(site.deployment.as_str())) .select(s::name) diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index be9f889c84a..de7e6895083 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -28,6 +28,7 @@ use diesel::{connection::SimpleConnection, Connection}; use diesel::{ debug_query, sql_query, OptionalExtension, PgConnection, QueryDsl, QueryResult, RunQueryDsl, }; +use graph::blockchain::block_stream::{EntityOperationKind, EntitySourceOperation}; use graph::blockchain::BlockTime; use graph::cheap_clone::CheapClone; use graph::components::store::write::{RowGroup, WriteChunk}; @@ -50,20 +51,21 @@ use std::borrow::Borrow; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::convert::{From, TryFrom}; use std::fmt::{self, Write}; +use std::ops::Range; use std::str::FromStr; use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use crate::relational::value::{FromOidRow, OidRow}; use crate::relational_queries::{ - ConflictingEntitiesData, ConflictingEntitiesQuery, FindChangesQuery, FindDerivedQuery, - FindPossibleDeletionsQuery, ReturnedEntityData, + ConflictingEntitiesData, ConflictingEntitiesQuery, EntityDataExt, FindChangesQuery, + FindDerivedQuery, FindPossibleDeletionsQuery, ReturnedEntityData, }; use crate::{ primary::{Namespace, Site}, relational_queries::{ ClampRangeQuery, EntityData, EntityDeletion, FilterCollection, FilterQuery, FindManyQuery, - InsertQuery, RevertClampQuery, RevertRemoveQuery, + FindRangeQuery, InsertQuery, RevertClampQuery, RevertRemoveQuery, }, }; use graph::components::store::{AttributeNames, DerivedEntityQuery}; @@ -74,7 +76,7 @@ use graph::prelude::{ QueryExecutionError, StoreError, StoreEvent, ValueType, BLOCK_NUMBER_MAX, }; -use crate::block_range::{BLOCK_COLUMN, BLOCK_RANGE_COLUMN}; +use crate::block_range::{BoundSide, BLOCK_COLUMN, BLOCK_RANGE_COLUMN}; pub use crate::catalog::Catalog; use crate::connection_pool::ForeignServer; use crate::{catalog, deployment}; @@ -541,6 +543,143 @@ impl Layout { Ok(entities) } + pub fn find_range( + &self, + conn: &mut PgConnection, + entity_types: Vec, + causality_region: CausalityRegion, + block_range: Range, + ) -> Result>, StoreError> { + let mut tables = vec![]; + for et in entity_types { + tables.push(self.table_for_entity(&et)?.as_ref()); + } + let mut entities: BTreeMap> = BTreeMap::new(); + + // Collect all entities that have their 'lower(block_range)' attribute in the + // interval of blocks defined by the variable block_range. For the immutable + // entities the respective attribute is 'block$'. + // Here are all entities that are created or modified in the block_range. + let lower_vec = FindRangeQuery::new( + &tables, + causality_region, + BoundSide::Lower, + block_range.clone(), + ) + .get_results::(conn) + .optional()? + .unwrap_or_default(); + // Collect all entities that have their 'upper(block_range)' attribute in the + // interval of blocks defined by the variable block_range. For the immutable + // entities no entries are returned. + // Here are all entities that are modified or deleted in the block_range, + // but will have the previous versions, i.e. in the case of an update, it's + // the version before the update, and lower_vec will have a corresponding + // entry with the new version. + let upper_vec = + FindRangeQuery::new(&tables, causality_region, BoundSide::Upper, block_range) + .get_results::(conn) + .optional()? + .unwrap_or_default(); + let mut lower_iter = lower_vec.iter().fuse().peekable(); + let mut upper_iter = upper_vec.iter().fuse().peekable(); + let mut lower_now = lower_iter.next(); + let mut upper_now = upper_iter.next(); + // A closure to convert the entity data from the database into entity operation. + let transform = |ede: &EntityDataExt, + entity_op: EntityOperationKind| + -> Result<(EntitySourceOperation, BlockNumber), StoreError> { + let e = EntityData::new(ede.entity.clone(), ede.data.clone()); + let block = ede.block_number; + let entity_type = e.entity_type(&self.input_schema); + let entity = e.deserialize_with_layout::(self, None)?; + let vid = ede.vid; + let ewt = EntitySourceOperation { + entity_op, + entity_type, + entity, + vid, + }; + Ok((ewt, block)) + }; + + fn compare_entity_data_ext(a: &EntityDataExt, b: &EntityDataExt) -> std::cmp::Ordering { + a.block_number + .cmp(&b.block_number) + .then_with(|| a.entity.cmp(&b.entity)) + .then_with(|| a.id.cmp(&b.id)) + } + + // The algorithm is a similar to merge sort algorithm and it relays on the fact that both vectors + // are ordered by (block_number, entity_type, entity_id). It advances simultaneously entities from + // both lower_vec and upper_vec and tries to match entities that have entries in both vectors for + // a particular block. The match is successful if an entry in one array has the same values in the + // other one for the number of the block, entity type and the entity id. The comparison operation + // over the EntityDataExt implements that check. If there is a match it’s a modification operation, + // since both sides of a range are present for that block, entity type and id. If one side of the + // range exists and the other is missing it is a creation or deletion depending on which side is + // present. For immutable entities the entries in upper_vec are missing, hence they are considered + // having a lower bound at particular block and upper bound at infinity. + while lower_now.is_some() || upper_now.is_some() { + let (ewt, block) = match (lower_now, upper_now) { + (Some(lower), Some(upper)) => { + match compare_entity_data_ext(lower, upper) { + std::cmp::Ordering::Greater => { + // we have upper bound at this block, but no lower bounds at the same block so it's deletion + let (ewt, block) = transform(upper, EntityOperationKind::Delete)?; + // advance upper_vec pointer + upper_now = upper_iter.next(); + (ewt, block) + } + std::cmp::Ordering::Less => { + // we have lower bound at this block but no upper bound at the same block so its creation + let (ewt, block) = transform(lower, EntityOperationKind::Create)?; + // advance lower_vec pointer + lower_now = lower_iter.next(); + (ewt, block) + } + std::cmp::Ordering::Equal => { + let (ewt, block) = transform(lower, EntityOperationKind::Modify)?; + // advance both lower_vec and upper_vec pointers + lower_now = lower_iter.next(); + upper_now = upper_iter.next(); + (ewt, block) + } + } + } + (Some(lower), None) => { + // we have lower bound at this block but no upper bound at the same block so its creation + let (ewt, block) = transform(lower, EntityOperationKind::Create)?; + // advance lower_vec pointer + lower_now = lower_iter.next(); + (ewt, block) + } + (None, Some(upper)) => { + // we have upper bound at this block, but no lower bounds at all so it's deletion + let (ewt, block) = transform(upper, EntityOperationKind::Delete)?; + // advance upper_vec pointer + upper_now = upper_iter.next(); + (ewt, block) + } + _ => panic!("Imposible case to happen"), + }; + + match entities.get_mut(&block) { + Some(vec) => vec.push(ewt), + None => { + let _ = entities.insert(block, vec![ewt]); + } + }; + } + + // sort the elements in each blocks bucket by vid + for (_, vec) in &mut entities { + vec.sort_by(|a, b| a.vid.cmp(&b.vid)); + } + + Ok(entities) + } + pub fn find_derived( &self, conn: &mut PgConnection, diff --git a/store/postgres/src/relational/ddl.rs b/store/postgres/src/relational/ddl.rs index aa3aefd3561..40a02d6051e 100644 --- a/store/postgres/src/relational/ddl.rs +++ b/store/postgres/src/relational/ddl.rs @@ -116,12 +116,18 @@ impl Table { Ok(cols) } + let vid_type = if self.object.has_vid_seq() { + "bigint" + } else { + "bigserial" + }; + if self.immutable { writeln!( out, " create table {qname} ( - {vid} bigserial primary key, + {vid} {vid_type} primary key, {block} int not null,\n\ {cols}, unique({id}) @@ -129,6 +135,7 @@ impl Table { qname = self.qualified_name, cols = columns_ddl(self)?, vid = VID_COLUMN, + vid_type = vid_type, block = BLOCK_COLUMN, id = self.primary_key().name ) @@ -137,13 +144,14 @@ impl Table { out, r#" create table {qname} ( - {vid} bigserial primary key, + {vid} {vid_type} primary key, {block_range} int4range not null, {cols} );"#, qname = self.qualified_name, cols = columns_ddl(self)?, vid = VID_COLUMN, + vid_type = vid_type, block_range = BLOCK_RANGE_COLUMN )?; diff --git a/store/postgres/src/relational/ddl_tests.rs b/store/postgres/src/relational/ddl_tests.rs index b7f9b44afac..86e9f232d49 100644 --- a/store/postgres/src/relational/ddl_tests.rs +++ b/store/postgres/src/relational/ddl_tests.rs @@ -384,7 +384,7 @@ create type sgd0815."size" as enum ('large', 'medium', 'small'); create table "sgd0815"."thing" ( - vid bigserial primary key, + vid bigint primary key, block_range int4range not null, "id" text not null, "big_thing" text not null @@ -405,7 +405,7 @@ create index attr_0_1_thing_big_thing create table "sgd0815"."scalar" ( - vid bigserial primary key, + vid bigint primary key, block_range int4range not null, "id" text not null, "bool" boolean, @@ -444,7 +444,7 @@ create index attr_1_7_scalar_color create table "sgd0815"."file_thing" ( - vid bigserial primary key, + vid bigint primary key, block_range int4range not null, causality_region int not null, "id" text not null @@ -469,7 +469,7 @@ create type sgd0815."size" as enum ('large', 'medium', 'small'); create table "sgd0815"."thing" ( - vid bigserial primary key, + vid bigint primary key, block_range int4range not null, "id" text not null, "big_thing" text not null @@ -490,7 +490,7 @@ create index attr_0_1_thing_big_thing create table "sgd0815"."scalar" ( - vid bigserial primary key, + vid bigint primary key, block_range int4range not null, "id" text not null, "bool" boolean, @@ -515,7 +515,7 @@ create index attr_1_0_scalar_id create table "sgd0815"."file_thing" ( - vid bigserial primary key, + vid bigint primary key, block_range int4range not null, causality_region int not null, "id" text not null @@ -575,7 +575,7 @@ type SongStat @entity { played: Int! }"#; const MUSIC_DDL: &str = r#"create table "sgd0815"."musician" ( - vid bigserial primary key, + vid bigint primary key, block_range int4range not null, "id" text not null, "name" text not null, @@ -598,7 +598,7 @@ create index attr_0_2_musician_main_band on "sgd0815"."musician" using gist("main_band", block_range); create table "sgd0815"."band" ( - vid bigserial primary key, + vid bigint primary key, block_range int4range not null, "id" text not null, "name" text not null, @@ -618,8 +618,8 @@ create index attr_1_1_band_name on "sgd0815"."band" using btree(left("name", 256)); create table "sgd0815"."song" ( - vid bigserial primary key, - block$ int not null, + vid bigint primary key, + block$ int not null, "id" text not null, "title" text not null, "written_by" text not null, @@ -634,7 +634,7 @@ create index attr_2_1_song_written_by on "sgd0815"."song" using btree("written_by", block$); create table "sgd0815"."song_stat" ( - vid bigserial primary key, + vid bigint primary key, block_range int4range not null, "id" text not null, "played" int4 not null @@ -676,7 +676,7 @@ type Habitat @entity { }"#; const FOREST_DDL: &str = r#"create table "sgd0815"."animal" ( - vid bigserial primary key, + vid bigint primary key, block_range int4range not null, "id" text not null, "forest" text @@ -695,8 +695,8 @@ create index attr_0_1_animal_forest on "sgd0815"."animal" using gist("forest", block_range); create table "sgd0815"."forest" ( - vid bigserial primary key, - block_range int4range not null, + vid bigint primary key, + block_range int4range not null, "id" text not null ); alter table "sgd0815"."forest" @@ -711,7 +711,7 @@ create index attr_1_0_forest_id on "sgd0815"."forest" using btree("id"); create table "sgd0815"."habitat" ( - vid bigserial primary key, + vid bigint primary key, block_range int4range not null, "id" text not null, "most_common" text not null, @@ -763,7 +763,7 @@ type Habitat @entity { }"#; const FULLTEXT_DDL: &str = r#"create table "sgd0815"."animal" ( - vid bigserial primary key, + vid bigint primary key, block_range int4range not null, "id" text not null, "name" text not null, @@ -791,7 +791,7 @@ create index attr_0_4_animal_search on "sgd0815"."animal" using gin("search"); create table "sgd0815"."forest" ( - vid bigserial primary key, + vid bigint primary key, block_range int4range not null, "id" text not null ); @@ -808,7 +808,7 @@ create index attr_1_0_forest_id on "sgd0815"."forest" using btree("id"); create table "sgd0815"."habitat" ( - vid bigserial primary key, + vid bigint primary key, block_range int4range not null, "id" text not null, "most_common" text not null, @@ -843,7 +843,7 @@ enum Orientation { const FORWARD_ENUM_SQL: &str = r#"create type sgd0815."orientation" as enum ('DOWN', 'UP'); create table "sgd0815"."thing" ( - vid bigserial primary key, + vid bigint primary key, block_range int4range not null, "id" text not null, "orientation" "sgd0815"."orientation" not null @@ -880,8 +880,8 @@ type Stats @aggregation(intervals: ["hour", "day"], source: "Data") { const TS_SQL: &str = r#" create table "sgd0815"."data" ( - vid bigserial primary key, - block$ int not null, + vid bigint primary key, + block$ int not null, "id" int8 not null, "timestamp" timestamptz not null, "amount" numeric not null, @@ -896,7 +896,7 @@ create index attr_0_1_data_amount create table "sgd0815"."stats_hour" ( vid bigserial primary key, - block$ int not null, + block$ int not null, "id" int8 not null, "timestamp" timestamptz not null, "volume" numeric not null, @@ -971,9 +971,9 @@ const LIFETIME_GQL: &str = r#" const LIFETIME_SQL: &str = r#" create table "sgd0815"."data" ( - vid bigserial primary key, - block$ int not null, -"id" int8 not null, + vid bigint primary key, + block$ int not null, + "id" int8 not null, "timestamp" timestamptz not null, "group_1" int4 not null, "group_2" int4 not null, @@ -993,8 +993,8 @@ on "sgd0815"."data" using btree("amount"); create table "sgd0815"."stats_1_hour" ( vid bigserial primary key, - block$ int not null, -"id" int8 not null, + block$ int not null, + "id" int8 not null, "timestamp" timestamptz not null, "volume" numeric not null, unique(id) @@ -1009,8 +1009,8 @@ on "sgd0815"."stats_1_hour" using btree("volume"); create table "sgd0815"."stats_1_day" ( vid bigserial primary key, - block$ int not null, -"id" int8 not null, + block$ int not null, + "id" int8 not null, "timestamp" timestamptz not null, "volume" numeric not null, unique(id) @@ -1025,8 +1025,8 @@ on "sgd0815"."stats_1_day" using btree("volume"); create table "sgd0815"."stats_2_hour" ( vid bigserial primary key, - block$ int not null, -"id" int8 not null, + block$ int not null, + "id" int8 not null, "timestamp" timestamptz not null, "group_1" int4 not null, "volume" numeric not null, @@ -1045,8 +1045,8 @@ on "sgd0815"."stats_2_hour"(group_1, timestamp); create table "sgd0815"."stats_2_day" ( vid bigserial primary key, - block$ int not null, -"id" int8 not null, + block$ int not null, + "id" int8 not null, "timestamp" timestamptz not null, "group_1" int4 not null, "volume" numeric not null, @@ -1065,8 +1065,8 @@ on "sgd0815"."stats_2_day"(group_1, timestamp); create table "sgd0815"."stats_3_hour" ( vid bigserial primary key, - block$ int not null, -"id" int8 not null, + block$ int not null, + "id" int8 not null, "timestamp" timestamptz not null, "group_2" int4 not null, "group_1" int4 not null, @@ -1088,8 +1088,8 @@ on "sgd0815"."stats_3_hour"(group_2, group_1, timestamp); create table "sgd0815"."stats_3_day" ( vid bigserial primary key, - block$ int not null, -"id" int8 not null, + block$ int not null, + "id" int8 not null, "timestamp" timestamptz not null, "group_2" int4 not null, "group_1" int4 not null, diff --git a/store/postgres/src/relational/dsl.rs b/store/postgres/src/relational/dsl.rs index 6812bbb37e9..e804a4d06ca 100644 --- a/store/postgres/src/relational/dsl.rs +++ b/store/postgres/src/relational/dsl.rs @@ -22,7 +22,7 @@ use diesel::sql_types::{ use diesel::{AppearsOnTable, Expression, QueryDsl, QueryResult, SelectableExpression}; use diesel_dynamic_schema::DynamicSelectClause; use graph::components::store::{AttributeNames, BlockNumber, StoreError, BLOCK_NUMBER_MAX}; -use graph::data::store::{Id, IdType, ID}; +use graph::data::store::{Id, IdType, ID, VID}; use graph::data_source::CausalityRegion; use graph::prelude::{lazy_static, ENV_VARS}; @@ -254,11 +254,16 @@ impl<'a> Table<'a> { } match column_names { - AttributeNames::All => cols.extend(self.meta.columns.iter()), + AttributeNames::All => { + cols.extend(self.meta.columns.iter()); + } AttributeNames::Select(names) => { let pk = self.meta.primary_key(); cols.push(pk); - let mut names: Vec<_> = names.iter().filter(|name| *name != &*ID).collect(); + let mut names: Vec<_> = names + .iter() + .filter(|name| *name != &*ID && *name != &*VID) + .collect(); names.sort(); for name in names { let column = self.meta.column_for_field(&name)?; @@ -280,8 +285,9 @@ impl<'a> Table<'a> { } } + cols.push(&*VID_COL); + if T::WITH_SYSTEM_COLUMNS { - cols.push(&*VID_COL); if self.meta.immutable { cols.push(&*BLOCK_COL); } else { diff --git a/store/postgres/src/relational/prune.rs b/store/postgres/src/relational/prune.rs index 6b5fcdc6940..62632549397 100644 --- a/store/postgres/src/relational/prune.rs +++ b/store/postgres/src/relational/prune.rs @@ -1,4 +1,4 @@ -use std::{fmt::Write, sync::Arc, time::Instant}; +use std::{fmt::Write, sync::Arc}; use diesel::{ connection::SimpleConnection, @@ -18,51 +18,13 @@ use graph::{ use itertools::Itertools; use crate::{ - catalog, - copy::AdaptiveBatchSize, - deployment, + catalog, deployment, relational::{Table, VID_COLUMN}, + vid_batcher::{VidBatcher, VidRange}, }; use super::{Catalog, Layout, Namespace}; -// Additions to `Table` that are useful for pruning -impl Table { - /// Return the first and last vid of any entity that is visible in the - /// block range from `first_block` (inclusive) to `last_block` - /// (exclusive) - fn vid_range( - &self, - conn: &mut PgConnection, - first_block: BlockNumber, - last_block: BlockNumber, - ) -> Result<(i64, i64), StoreError> { - #[derive(QueryableByName)] - struct VidRange { - #[diesel(sql_type = BigInt)] - min_vid: i64, - #[diesel(sql_type = BigInt)] - max_vid: i64, - } - - // Determine the last vid that we need to copy - let VidRange { min_vid, max_vid } = sql_query(format!( - "/* controller=prune,first={first_block},last={last_block} */ \ - select coalesce(min(vid), 0) as min_vid, \ - coalesce(max(vid), -1) as max_vid from {src} \ - where lower(block_range) <= $2 \ - and coalesce(upper(block_range), 2147483647) > $1 \ - and coalesce(upper(block_range), 2147483647) <= $2 \ - and block_range && int4range($1, $2)", - src = self.qualified_name, - )) - .bind::(first_block) - .bind::(last_block) - .get_result::(conn)?; - Ok((min_vid, max_vid)) - } -} - /// Utility to copy relevant data out of a source table and into a new /// destination table and replace the source table with the destination /// table @@ -123,52 +85,48 @@ impl TablePair { let column_list = self.column_list(); // Determine the last vid that we need to copy - let (min_vid, max_vid) = self.src.vid_range(conn, earliest_block, final_block)?; - - let mut batch_size = AdaptiveBatchSize::new(&self.src); - // The first vid we still need to copy - let mut next_vid = min_vid; - while next_vid <= max_vid { - let start = Instant::now(); - let rows = conn.transaction(|conn| { - // Page through all rows in `src` in batches of `batch_size` - // and copy the ones that are visible to queries at block - // heights between `earliest_block` and `final_block`, but - // whose block_range does not extend past `final_block` - // since they could still be reverted while we copy. - // The conditions on `block_range` are expressed redundantly - // to make more indexes useable - sql_query(format!( - "/* controller=prune,phase=final,start_vid={next_vid},batch_size={batch_size} */ \ + let range = VidRange::for_prune(conn, &self.src, earliest_block, final_block)?; + let mut batcher = VidBatcher::load(conn, &self.src_nsp, &self.src, range)?; + + while !batcher.finished() { + let (_, rows) = batcher.step(|start, end| { + conn.transaction(|conn| { + // Page through all rows in `src` in batches of `batch_size` + // and copy the ones that are visible to queries at block + // heights between `earliest_block` and `final_block`, but + // whose block_range does not extend past `final_block` + // since they could still be reverted while we copy. + // The conditions on `block_range` are expressed redundantly + // to make more indexes useable + sql_query(format!( + "/* controller=prune,phase=final,start_vid={start},batch_size={batch_size} */ \ insert into {dst}({column_list}) \ select {column_list} from {src} \ where lower(block_range) <= $2 \ and coalesce(upper(block_range), 2147483647) > $1 \ and coalesce(upper(block_range), 2147483647) <= $2 \ and block_range && int4range($1, $2, '[]') \ - and vid >= $3 and vid < $3 + $4 \ + and vid >= $3 and vid <= $4 \ order by vid", src = self.src.qualified_name, dst = self.dst.qualified_name, - batch_size = batch_size.size, + batch_size = end - start + 1, )) - .bind::(earliest_block) - .bind::(final_block) - .bind::(next_vid) - .bind::(&batch_size) - .execute(conn) + .bind::(earliest_block) + .bind::(final_block) + .bind::(start) + .bind::(end) + .execute(conn) + .map_err(StoreError::from) + }) })?; cancel.check_cancel()?; - next_vid += batch_size.size; - - batch_size.adapt(start.elapsed()); - reporter.prune_batch( self.src.name.as_str(), - rows, + rows.unwrap_or(0), PrunePhase::CopyFinal, - next_vid > max_vid, + batcher.finished(), ); } Ok(()) @@ -186,49 +144,42 @@ impl TablePair { let column_list = self.column_list(); // Determine the last vid that we need to copy - let (min_vid, max_vid) = self - .src - .vid_range(conn, final_block + 1, BLOCK_NUMBER_MAX)?; - - let mut batch_size = AdaptiveBatchSize::new(&self.src); - // The first vid we still need to copy - let mut next_vid = min_vid; - while next_vid <= max_vid { - let start = Instant::now(); - let rows = conn.transaction(|conn| { + let range = VidRange::for_prune(conn, &self.src, final_block + 1, BLOCK_NUMBER_MAX)?; + let mut batcher = VidBatcher::load(conn, &self.src.nsp, &self.src, range)?; + + while !batcher.finished() { + let (_, rows) = batcher.step(|start, end| { // Page through all the rows in `src` in batches of // `batch_size` that are visible to queries at block heights - // starting right after `final_block`. - // The conditions on `block_range` are expressed redundantly - // to make more indexes useable - sql_query(format!( - "/* controller=prune,phase=nonfinal,start_vid={next_vid},batch_size={batch_size} */ \ + // starting right after `final_block`. The conditions on + // `block_range` are expressed redundantly to make more + // indexes useable + conn.transaction(|conn| { + sql_query(format!( + "/* controller=prune,phase=nonfinal,start_vid={start},batch_size={batch_size} */ \ insert into {dst}({column_list}) \ select {column_list} from {src} \ where coalesce(upper(block_range), 2147483647) > $1 \ and block_range && int4range($1, null) \ - and vid >= $2 and vid < $2 + $3 \ + and vid >= $2 and vid <= $3 \ order by vid", - dst = self.dst.qualified_name, - src = self.src.qualified_name, - batch_size = batch_size.size - )) - .bind::(final_block) - .bind::(next_vid) - .bind::(&batch_size) - .execute(conn) - .map_err(StoreError::from) + dst = self.dst.qualified_name, + src = self.src.qualified_name, + batch_size = end - start + 1, + )) + .bind::(final_block) + .bind::(start) + .bind::(end) + .execute(conn) + .map_err(StoreError::from) + }) })?; - next_vid += batch_size.size; - - batch_size.adapt(start.elapsed()); - reporter.prune_batch( self.src.name.as_str(), - rows, + rows.unwrap_or(0), PrunePhase::CopyNonfinal, - next_vid > max_vid, + batcher.finished(), ); } Ok(()) @@ -252,12 +203,14 @@ impl TablePair { "src" => src_nsp.as_str(), "error" => e.to_string()); } - // Make sure the vid sequence - // continues from where it was - writeln!( - query, - "select setval('{dst_nsp}.{vid_seq}', nextval('{src_nsp}.{vid_seq}'));" - )?; + // Make sure the vid sequence continues from where it was in case + // that we use autoincrementing order of the DB + if !self.src.object.has_vid_seq() { + writeln!( + query, + "select setval('{dst_nsp}.{vid_seq}', nextval('{src_nsp}.{vid_seq}'));" + )?; + } writeln!(query, "drop table {src_qname};")?; writeln!(query, "alter table {dst_qname} set schema {src_nsp}")?; @@ -458,33 +411,28 @@ impl Layout { PruningStrategy::Delete => { // Delete all entity versions whose range was closed // before `req.earliest_block` - let (min_vid, max_vid) = table.vid_range(conn, 0, req.earliest_block)?; - let mut batch_size = AdaptiveBatchSize::new(&table); - let mut next_vid = min_vid; - while next_vid <= max_vid { - let start = Instant::now(); - let rows = sql_query(format!( - "/* controller=prune,phase=delete,start_vid={next_vid},batch_size={batch_size} */ \ + let range = VidRange::for_prune(conn, &table, 0, req.earliest_block)?; + let mut batcher = VidBatcher::load(conn, &self.site.namespace, &table, range)?; + + while !batcher.finished() { + let (_, rows) = batcher.step(|start, end| {sql_query(format!( + "/* controller=prune,phase=delete,start_vid={start},batch_size={batch_size} */ \ delete from {qname} \ where coalesce(upper(block_range), 2147483647) <= $1 \ - and vid >= $2 and vid < $2 + $3", + and vid >= $2 and vid <= $3", qname = table.qualified_name, - batch_size = batch_size.size + batch_size = end - start + 1 )) .bind::(req.earliest_block) - .bind::(next_vid) - .bind::(&batch_size) - .execute(conn)?; - - next_vid += batch_size.size; - - batch_size.adapt(start.elapsed()); + .bind::(start) + .bind::(end) + .execute(conn).map_err(StoreError::from)})?; reporter.prune_batch( table.name.as_str(), - rows as usize, + rows.unwrap_or(0), PrunePhase::Delete, - next_vid > max_vid, + batcher.finished(), ); } } diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index 56ad1aafacb..f4b55e89150 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -31,13 +31,15 @@ use std::collections::{BTreeMap, BTreeSet, HashSet}; use std::convert::TryFrom; use std::fmt::{self, Display}; use std::iter::FromIterator; +use std::ops::Range; use std::str::FromStr; use std::string::ToString; +use crate::block_range::{BoundSide, EntityBlockRange}; use crate::relational::dsl::AtBlock; use crate::relational::{ dsl, Column, ColumnType, Layout, SqlName, Table, BYTE_ARRAY_PREFIX_SIZE, PRIMARY_KEY_COLUMN, - STRING_PREFIX_SIZE, + STRING_PREFIX_SIZE, VID_COLUMN, }; use crate::{ block_range::{ @@ -442,7 +444,7 @@ pub fn parse_id(id_type: IdType, json: serde_json::Value) -> Result EntityData { + EntityData { entity, data } + } + pub fn entity_type(&self, schema: &InputSchema) -> EntityType { schema.entity_type(&self.entity).unwrap() } @@ -508,7 +514,14 @@ impl EntityData { // table column; those will be things like the // block_range that `select *` pulls in but that we // don't care about here - if let Some(column) = table.column(&SqlName::verbatim(key)) { + if key == VID_COLUMN { + // VID is not in the input schema but we need it, so deserialize it too + match T::Value::from_column_value(&ColumnType::Int8, json) { + Ok(value) if value.is_null() => None, + Ok(value) => Some(Ok((Word::from(VID_COLUMN), value))), + Err(e) => Some(Err(e)), + } + } else if let Some(column) = table.column(&SqlName::verbatim(key)) { match T::Value::from_column_value(&column.column_type, json) { Ok(value) if value.is_null() => None, Ok(value) => Some(Ok((Word::from(column.field.to_string()), value))), @@ -527,6 +540,20 @@ impl EntityData { } } +#[derive(QueryableByName, Clone, Debug, Default)] +pub struct EntityDataExt { + #[diesel(sql_type = Text)] + pub entity: String, + #[diesel(sql_type = Jsonb)] + pub data: serde_json::Value, + #[diesel(sql_type = Integer)] + pub block_number: i32, + #[diesel(sql_type = Binary)] + pub id: Vec, + #[diesel(sql_type = BigInt)] + pub vid: i64, +} + /// The equivalent of `graph::data::store::Value` but in a form that does /// not require further transformation during `walk_ast`. This form takes /// the idiosyncrasies of how we serialize values into account (e.g., that @@ -1802,6 +1829,104 @@ impl<'a> QueryFragment for Filter<'a> { } } +#[derive(Debug, Clone)] +pub struct FindRangeQuery<'a> { + tables: &'a Vec<&'a Table>, + causality_region: CausalityRegion, + bound_side: BoundSide, + imm_range: EntityBlockRange, + mut_range: EntityBlockRange, +} + +impl<'a> FindRangeQuery<'a> { + pub fn new( + tables: &'a Vec<&Table>, + causality_region: CausalityRegion, + bound_side: BoundSide, + block_range: Range, + ) -> Self { + let imm_range = EntityBlockRange::new(true, block_range.clone(), bound_side); + let mut_range = EntityBlockRange::new(false, block_range, bound_side); + Self { + tables, + causality_region, + bound_side, + imm_range, + mut_range, + } + } +} + +impl<'a> QueryFragment for FindRangeQuery<'a> { + fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> QueryResult<()> { + out.unsafe_to_cache_prepared(); + let mut first = true; + + for table in self.tables.iter() { + // the immutable entities don't have upper range and also can't be modified or deleted + if matches!(self.bound_side, BoundSide::Lower) || !table.immutable { + if first { + first = false; + } else { + out.push_sql("\nunion all\n"); + } + + // Generate + // select '..' as entity, to_jsonb(e.*) as data, {BLOCK_STATEMENT} as block_number + // from schema.table e where ... + // Here the {BLOCK_STATEMENT} is 'block$' for immutable tables and either 'lower(block_range)' + // or 'upper(block_range)' depending on the bound_side variable. + out.push_sql("select "); + out.push_bind_param::(table.object.as_str())?; + out.push_sql(" as entity, to_jsonb(e.*) as data,"); + if table.immutable { + self.imm_range.compare_column(&mut out) + } else { + self.mut_range.compare_column(&mut out) + } + out.push_sql("as block_number, id, vid\n"); + out.push_sql(" from "); + out.push_sql(table.qualified_name.as_str()); + out.push_sql(" e\n where"); + // add casuality region to the query + if table.has_causality_region { + out.push_sql("causality_region = "); + out.push_bind_param::(&self.causality_region)?; + out.push_sql(" and "); + } + if table.immutable { + self.imm_range.contains(&mut out)?; + } else { + self.mut_range.contains(&mut out)?; + } + } + } + + if first { + // In case we have only immutable entities, the upper range will not create any + // select statement. So here we have to generate an SQL statement thet returns + // empty result. + out.push_sql("select 'dummy_entity' as entity, to_jsonb(1) as data, 1 as block_number, 1 as id, 1 as vid where false"); + } else { + out.push_sql("\norder by block_number, entity, id"); + } + + Ok(()) + } +} + +impl<'a> QueryId for FindRangeQuery<'a> { + type QueryId = (); + + const HAS_STATIC_QUERY_ID: bool = false; +} + +impl<'a> Query for FindRangeQuery<'a> { + type SqlType = Untyped; +} + +impl<'a, Conn> RunQueryDsl for FindRangeQuery<'a> {} + /// Builds a query over a given set of [`Table`]s in an attempt to find updated /// and/or newly inserted entities at a given block number; i.e. such that the /// block range's lower bound is equal to said block number. @@ -2095,6 +2220,7 @@ struct InsertRow<'a> { values: Vec>, br_value: BlockRangeValue, causality_region: CausalityRegion, + vid: i64, } impl<'a> InsertRow<'a> { @@ -2131,10 +2257,12 @@ impl<'a> InsertRow<'a> { } let br_value = BlockRangeValue::new(table, row.block, row.end); let causality_region = row.causality_region; + let vid = row.entity.vid(); Ok(Self { values, br_value, causality_region, + vid, }) } } @@ -2198,7 +2326,14 @@ impl<'a> InsertQuery<'a> { /// query, and depends on what columns `table` has and how they get put /// into the query pub fn chunk_size(table: &Table) -> usize { - let mut count = 1; + // We always have one column for the block number/range + let mut count = 1 + ENV_VARS.store.insert_extra_cols; + if table.has_causality_region { + count += 1; + } + if table.object.has_vid_seq() { + count += 1; + } for column in table.columns.iter() { // This code depends closely on how `walk_ast` and `QueryValue` // put values into bind variables @@ -2220,6 +2355,8 @@ impl<'a> QueryFragment for InsertQuery<'a> { let out = &mut out; out.unsafe_to_cache_prepared(); + let has_vid_seq = self.table.object.has_vid_seq(); + // Construct a query // insert into schema.table(column, ...) // values @@ -2245,6 +2382,9 @@ impl<'a> QueryFragment for InsertQuery<'a> { out.push_sql(CAUSALITY_REGION_COLUMN); }; + if has_vid_seq { + out.push_sql(", vid"); + } out.push_sql(") values\n"); for (i, row) in self.rows.iter().enumerate() { @@ -2262,6 +2402,10 @@ impl<'a> QueryFragment for InsertQuery<'a> { out.push_sql(", "); out.push_bind_param::(&row.causality_region)?; }; + if has_vid_seq { + out.push_sql(", "); + out.push_bind_param::(&row.vid)?; + } out.push_sql(")"); } @@ -4661,6 +4805,8 @@ impl<'a> QueryFragment for CopyEntityBatchQuery<'a> { fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> QueryResult<()> { out.unsafe_to_cache_prepared(); + let has_vid_seq = self.src.object.has_vid_seq(); + // Construct a query // insert into {dst}({columns}) // select {columns} from {src} @@ -4681,6 +4827,9 @@ impl<'a> QueryFragment for CopyEntityBatchQuery<'a> { out.push_sql(", "); out.push_sql(CAUSALITY_REGION_COLUMN); }; + if has_vid_seq { + out.push_sql(", vid"); + } out.push_sql(")\nselect "); for column in &self.columns { @@ -4746,6 +4895,10 @@ impl<'a> QueryFragment for CopyEntityBatchQuery<'a> { )); } } + if has_vid_seq { + out.push_sql(", vid"); + } + out.push_sql(" from "); out.push_sql(self.src.qualified_name.as_str()); out.push_sql(" where vid >= "); diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index 41cbef15982..f6544a79e0d 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -44,7 +44,7 @@ use crate::{ index::{IndexList, Method}, Layout, }, - writable::WritableStore, + writable::{SourceableStore, WritableStore}, NotificationSender, }; use crate::{ @@ -268,6 +268,50 @@ impl SubgraphStore { pub fn for_site(&self, site: &Site) -> Result<&Arc, StoreError> { self.inner.for_site(site) } + + async fn get_or_create_writable_store( + self: Arc, + logger: Logger, + deployment: graph::components::store::DeploymentId, + manifest_idx_and_name: Arc>, + ) -> Result, StoreError> { + let deployment = deployment.into(); + // We cache writables to make sure calls to this method are + // idempotent and there is ever only one `WritableStore` for any + // deployment + if let Some(writable) = self.writables.lock().unwrap().get(&deployment) { + // A poisoned writable will not write anything anymore; we + // discard it and create a new one that is properly initialized + // according to the state in the database. + if !writable.poisoned() { + return Ok(writable.cheap_clone()); + } + } + + // Ideally the lower level functions would be asyncified. + let this = self.clone(); + let site = graph::spawn_blocking_allow_panic(move || -> Result<_, StoreError> { + this.find_site(deployment) + }) + .await + .unwrap()?; // Propagate panics, there shouldn't be any. + + let writable = Arc::new( + WritableStore::new( + self.as_ref().clone(), + logger, + site, + manifest_idx_and_name, + self.registry.clone(), + ) + .await?, + ); + self.writables + .lock() + .unwrap() + .insert(deployment, writable.cheap_clone()); + Ok(writable) + } } impl std::ops::Deref for SubgraphStore { @@ -1488,42 +1532,25 @@ impl SubgraphStoreTrait for SubgraphStore { deployment: graph::components::store::DeploymentId, manifest_idx_and_name: Arc>, ) -> Result, StoreError> { - let deployment = deployment.into(); - // We cache writables to make sure calls to this method are - // idempotent and there is ever only one `WritableStore` for any - // deployment - if let Some(writable) = self.writables.lock().unwrap().get(&deployment) { - // A poisoned writable will not write anything anymore; we - // discard it and create a new one that is properly initialized - // according to the state in the database. - if !writable.poisoned() { - return Ok(writable.cheap_clone()); - } - } + self.get_or_create_writable_store(logger, deployment, manifest_idx_and_name) + .await + .map(|store| store as Arc) + } - // Ideally the lower level functions would be asyncified. - let this = self.clone(); - let site = graph::spawn_blocking_allow_panic(move || -> Result<_, StoreError> { - this.find_site(deployment) - }) - .await - .unwrap()?; // Propagate panics, there shouldn't be any. + async fn sourceable( + self: Arc, + deployment: graph::components::store::DeploymentId, + ) -> Result, StoreError> { + let deployment = deployment.into(); + let site = self.find_site(deployment)?; + let store = self.for_site(&site)?; + let input_schema = self.input_schema(&site.deployment)?; - let writable = Arc::new( - WritableStore::new( - self.as_ref().clone(), - logger, - site, - manifest_idx_and_name, - self.registry.clone(), - ) - .await?, - ); - self.writables - .lock() - .unwrap() - .insert(deployment, writable.cheap_clone()); - Ok(writable) + Ok(Arc::new(SourceableStore::new( + site, + store.clone(), + input_schema, + ))) } async fn stop_subgraph(&self, loc: &DeploymentLocator) -> Result<(), StoreError> { diff --git a/store/postgres/src/vid_batcher.rs b/store/postgres/src/vid_batcher.rs new file mode 100644 index 00000000000..81da5382e3d --- /dev/null +++ b/store/postgres/src/vid_batcher.rs @@ -0,0 +1,425 @@ +use std::time::{Duration, Instant}; + +use diesel::{ + sql_query, + sql_types::{BigInt, Integer}, + PgConnection, RunQueryDsl as _, +}; +use graph::{ + env::ENV_VARS, + prelude::{BlockNumber, BlockPtr, StoreError}, + util::ogive::Ogive, +}; + +use crate::{ + catalog, + primary::Namespace, + relational::{Table, VID_COLUMN}, +}; + +/// The initial batch size for tables that do not have an array column +const INITIAL_BATCH_SIZE: i64 = 10_000; +/// The initial batch size for tables that do have an array column; those +/// arrays can be large and large arrays will slow down copying a lot. We +/// therefore tread lightly in that case +const INITIAL_BATCH_SIZE_LIST: i64 = 100; + +/// Track the desired size of a batch in such a way that doing the next +/// batch gets close to TARGET_DURATION for the time it takes to copy one +/// batch, but don't step up the size by more than 2x at once +#[derive(Debug, Queryable)] +pub(crate) struct AdaptiveBatchSize { + pub size: i64, + pub target: Duration, +} + +impl AdaptiveBatchSize { + pub fn new(table: &Table) -> Self { + let size = if table.columns.iter().any(|col| col.is_list()) { + INITIAL_BATCH_SIZE_LIST + } else { + INITIAL_BATCH_SIZE + }; + + Self { + size, + target: ENV_VARS.store.batch_target_duration, + } + } + + // adjust batch size by trying to extrapolate in such a way that we + // get close to TARGET_DURATION for the time it takes to copy one + // batch, but don't step up batch_size by more than 2x at once + pub fn adapt(&mut self, duration: Duration) -> i64 { + // Avoid division by zero + let duration = duration.as_millis().max(1); + let new_batch_size = self.size as f64 * self.target.as_millis() as f64 / duration as f64; + self.size = (2 * self.size).min(new_batch_size.round() as i64); + self.size + } +} + +/// A timer that works like `std::time::Instant` in non-test code, but +/// returns a fake elapsed value in tests +struct Timer { + start: Instant, + #[cfg(test)] + duration: Duration, +} + +impl Timer { + fn new() -> Self { + Self { + start: Instant::now(), + #[cfg(test)] + duration: Duration::from_secs(0), + } + } + + fn start(&mut self) { + self.start = Instant::now(); + } + + #[cfg(test)] + fn elapsed(&self) -> Duration { + self.duration + } + + #[cfg(not(test))] + fn elapsed(&self) -> Duration { + self.start.elapsed() + } + + #[cfg(test)] + fn set(&mut self, duration: Duration) { + self.duration = duration; + } +} + +/// A batcher for moving through a large range of `vid` values in a way such +/// that each batch takes approximatley the same amount of time. The batcher +/// takes uneven distributions of `vid` values into account by using the +/// histogram from `pg_stats` for the table through which we are iterating. +pub(crate) struct VidBatcher { + batch_size: AdaptiveBatchSize, + start: i64, + end: i64, + max_vid: i64, + + ogive: Option, + + step_timer: Timer, +} + +impl VidBatcher { + fn histogram_bounds( + conn: &mut PgConnection, + nsp: &Namespace, + table: &Table, + range: VidRange, + ) -> Result, StoreError> { + let bounds = catalog::histogram_bounds(conn, nsp, &table.name, VID_COLUMN)? + .into_iter() + .filter(|bound| range.min < *bound && range.max > *bound) + .chain(vec![range.min, range.max].into_iter()) + .collect::>(); + Ok(bounds) + } + + /// Initialize a batcher for batching through entries in `table` with + /// `vid` in the given `vid_range` + /// + /// The `vid_range` is inclusive, i.e., the batcher will iterate over + /// all vids `vid_range.0 <= vid <= vid_range.1`; for an empty table, + /// the `vid_range` must be set to `(-1, 0)` + pub fn load( + conn: &mut PgConnection, + nsp: &Namespace, + table: &Table, + vid_range: VidRange, + ) -> Result { + let bounds = Self::histogram_bounds(conn, nsp, table, vid_range)?; + let batch_size = AdaptiveBatchSize::new(table); + Self::new(bounds, vid_range, batch_size) + } + + fn new( + bounds: Vec, + range: VidRange, + batch_size: AdaptiveBatchSize, + ) -> Result { + let start = range.min; + + let mut ogive = if range.is_empty() { + None + } else { + Some(Ogive::from_equi_histogram(bounds, range.size())?) + }; + let end = match ogive.as_mut() { + None => start + batch_size.size, + Some(ogive) => ogive.next_point(start, batch_size.size as usize)?, + }; + + Ok(Self { + batch_size, + start, + end, + max_vid: range.max, + ogive, + step_timer: Timer::new(), + }) + } + + /// Explicitly set the batch size + pub fn with_batch_size(mut self: VidBatcher, size: usize) -> Self { + self.batch_size.size = size as i64; + self + } + + pub(crate) fn next_vid(&self) -> i64 { + self.start + } + + pub(crate) fn target_vid(&self) -> i64 { + self.max_vid + } + + pub fn batch_size(&self) -> usize { + self.batch_size.size as usize + } + + pub fn finished(&self) -> bool { + self.start > self.max_vid + } + + /// Perform the work for one batch. The function `f` is called with the + /// start and end `vid` for this batch and should perform all the work + /// for rows with `start <= vid <= end`, i.e. the start and end values + /// are inclusive. + /// + /// Once `f` returns, the batch size will be adjusted so that the time + /// the next batch will take is close to the target duration. + /// + /// The function returns the time it took to process the batch and the + /// result of `f`. If the batcher is finished, `f` will not be called, + /// and `None` will be returned as its result. + pub fn step(&mut self, mut f: F) -> Result<(Duration, Option), StoreError> + where + F: FnMut(i64, i64) -> Result, + { + if self.finished() { + return Ok((Duration::from_secs(0), None)); + } + + match self.ogive.as_mut() { + None => Ok((Duration::from_secs(0), None)), + Some(ogive) => { + self.step_timer.start(); + + let res = f(self.start, self.end)?; + let duration = self.step_timer.elapsed(); + + let batch_size = self.batch_size.adapt(duration); + self.start = self.end + 1; + self.end = ogive.next_point(self.start, batch_size as usize)?; + + Ok((duration, Some(res))) + } + } + } +} + +#[derive(Copy, Clone, QueryableByName)] +pub(crate) struct VidRange { + #[diesel(sql_type = BigInt, column_name = "min_vid")] + pub min: i64, + #[diesel(sql_type = BigInt, column_name = "max_vid")] + pub max: i64, +} + +const EMPTY_VID_RANGE: VidRange = VidRange { max: -1, min: 0 }; + +impl VidRange { + pub fn new(min_vid: i64, max_vid: i64) -> Self { + Self { + min: min_vid, + max: max_vid, + } + } + + pub fn is_empty(&self) -> bool { + self.max == -1 + } + + pub fn size(&self) -> usize { + (self.max - self.min) as usize + 1 + } + + /// Return the full range of `vid` values in the table `src` + pub fn for_copy( + conn: &mut PgConnection, + src: &Table, + target_block: &BlockPtr, + ) -> Result { + let max_block_clause = if src.immutable { + "block$ <= $1" + } else { + "lower(block_range) <= $1" + }; + let vid_range = sql_query(format!( + "/* controller=copy,target={target_number} */ \ + select coalesce(min(vid), 0) as min_vid, \ + coalesce(max(vid), -1) as max_vid \ + from {src_name} where {max_block_clause}", + target_number = target_block.number, + src_name = src.qualified_name.as_str(), + max_block_clause = max_block_clause + )) + .bind::(&target_block.number) + .load::(conn)? + .pop() + .unwrap_or(EMPTY_VID_RANGE); + Ok(vid_range) + } + + /// Return the first and last vid of any entity that is visible in the + /// block range from `first_block` (inclusive) to `last_block` + /// (exclusive) + pub fn for_prune( + conn: &mut PgConnection, + src: &Table, + first_block: BlockNumber, + last_block: BlockNumber, + ) -> Result { + sql_query(format!( + "/* controller=prune,first={first_block},last={last_block} */ \ + select coalesce(min(vid), 0) as min_vid, \ + coalesce(max(vid), -1) as max_vid from {src} \ + where lower(block_range) <= $2 \ + and coalesce(upper(block_range), 2147483647) > $1 \ + and coalesce(upper(block_range), 2147483647) <= $2 \ + and block_range && int4range($1, $2)", + src = src.qualified_name, + )) + .bind::(first_block) + .bind::(last_block) + .get_result::(conn) + .map_err(StoreError::from) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + const S001: Duration = Duration::from_secs(1); + const S010: Duration = Duration::from_secs(10); + const S050: Duration = Duration::from_secs(50); + const S100: Duration = Duration::from_secs(100); + const S200: Duration = Duration::from_secs(200); + + struct Batcher { + vid: VidBatcher, + } + + impl Batcher { + fn new(bounds: Vec, size: i64) -> Self { + let batch_size = AdaptiveBatchSize { size, target: S100 }; + let vid_range = VidRange::new(bounds[0], *bounds.last().unwrap()); + Self { + vid: VidBatcher::new(bounds, vid_range, batch_size).unwrap(), + } + } + + #[track_caller] + fn at(&self, start: i64, end: i64, size: i64) { + assert_eq!(self.vid.start, start, "at start"); + assert_eq!(self.vid.end, end, "at end"); + assert_eq!(self.vid.batch_size.size, size, "at size"); + } + + #[track_caller] + fn step(&mut self, start: i64, end: i64, duration: Duration) { + self.vid.step_timer.set(duration); + + match self.vid.step(|s, e| Ok((s, e))).unwrap() { + (d, Some((s, e))) => { + // Failing here indicates that our clever Timer is misbehaving + assert_eq!(d, duration, "step duration"); + assert_eq!(s, start, "step start"); + assert_eq!(e, end, "step end"); + } + (_, None) => { + if start > end { + // Expected, the batcher is exhausted + return; + } else { + panic!("step didn't return start and end") + } + } + } + } + + #[track_caller] + fn run(&mut self, start: i64, end: i64, size: i64, duration: Duration) { + self.at(start, end, size); + self.step(start, end, duration); + } + + fn finished(&self) -> bool { + self.vid.finished() + } + } + + #[test] + fn simple() { + let bounds = vec![10, 20, 30, 40, 49]; + let mut batcher = Batcher::new(bounds, 5); + + batcher.at(10, 15, 5); + + batcher.step(10, 15, S001); + batcher.at(16, 26, 10); + + batcher.step(16, 26, S001); + batcher.at(27, 46, 20); + assert!(!batcher.finished()); + + batcher.step(27, 46, S001); + batcher.at(47, 49, 40); + assert!(!batcher.finished()); + + batcher.step(47, 49, S001); + assert!(batcher.finished()); + batcher.at(50, 49, 80); + } + + #[test] + fn non_uniform() { + // A distribution that is flat in the beginning and then steeper and + // linear towards the end. The easiest way to see this is to graph + // `(bounds[i], i*40)` + let bounds = vec![40, 180, 260, 300, 320, 330, 340, 350, 359]; + let mut batcher = Batcher::new(bounds, 10); + + // The schedule of how we move through the bounds above in batches, + // with varying timings for each batch + batcher.run(040, 075, 10, S010); + batcher.run(076, 145, 20, S010); + batcher.run(146, 240, 40, S200); + batcher.run(241, 270, 20, S200); + batcher.run(271, 281, 10, S200); + batcher.run(282, 287, 05, S050); + batcher.run(288, 298, 10, S050); + batcher.run(299, 309, 20, S050); + batcher.run(310, 325, 40, S100); + batcher.run(326, 336, 40, S100); + batcher.run(337, 347, 40, S100); + batcher.run(348, 357, 40, S100); + batcher.run(358, 359, 40, S010); + assert!(batcher.finished()); + + batcher.at(360, 359, 80); + batcher.step(360, 359, S010); + } +} diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 99ccfd02217..a9525ba9eb5 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -1,11 +1,12 @@ use std::collections::BTreeSet; -use std::ops::Deref; +use std::ops::{Deref, Range}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Mutex, RwLock, TryLockError as RwLockError}; use std::time::Instant; use std::{collections::BTreeMap, sync::Arc}; -use graph::blockchain::block_stream::FirehoseCursor; +use async_trait::async_trait; +use graph::blockchain::block_stream::{EntitySourceOperation, FirehoseCursor}; use graph::blockchain::BlockTime; use graph::components::store::{Batch, DeploymentCursorTracker, DerivedEntityQuery, ReadStore}; use graph::constraint_violation; @@ -1571,6 +1572,47 @@ impl ReadStore for WritableStore { } } +pub struct SourceableStore { + site: Arc, + store: Arc, + input_schema: InputSchema, +} + +impl SourceableStore { + pub fn new(site: Arc, store: Arc, input_schema: InputSchema) -> Self { + Self { + site, + store, + input_schema, + } + } +} + +#[async_trait] +impl store::SourceableStore for SourceableStore { + fn get_range( + &self, + entity_types: Vec, + causality_region: CausalityRegion, + block_range: Range, + ) -> Result>, StoreError> { + self.store.get_range( + self.site.clone(), + entity_types, + causality_region, + block_range, + ) + } + + fn input_schema(&self) -> InputSchema { + self.input_schema.cheap_clone() + } + + async fn block_ptr(&self) -> Result, StoreError> { + self.store.block_ptr(self.site.cheap_clone()).await + } +} + impl DeploymentCursorTracker for WritableStore { fn block_ptr(&self) -> Option { self.block_ptr.lock().unwrap().clone() diff --git a/store/test-store/src/store.rs b/store/test-store/src/store.rs index afb088f6bf6..70fc26a3dde 100644 --- a/store/test-store/src/store.rs +++ b/store/test-store/src/store.rs @@ -163,7 +163,7 @@ pub async fn create_subgraph( let manifest = SubgraphManifest:: { id: subgraph_id.clone(), - spec_version: Version::new(1, 0, 0), + spec_version: Version::new(1, 3, 0), features: BTreeSet::new(), description: Some(format!("manifest for {}", subgraph_id)), repository: Some(format!("repo for {}", subgraph_id)), @@ -227,7 +227,7 @@ pub async fn create_test_subgraph_with_features( let manifest = SubgraphManifest:: { id: subgraph_id.clone(), - spec_version: Version::new(1, 0, 0), + spec_version: Version::new(1, 3, 0), features, description: Some(format!("manifest for {}", subgraph_id)), repository: Some(format!("repo for {}", subgraph_id)), @@ -422,12 +422,13 @@ pub async fn insert_entities( deployment: &DeploymentLocator, entities: Vec<(EntityType, Entity)>, ) -> Result<(), StoreError> { - let insert_ops = entities - .into_iter() - .map(|(entity_type, data)| EntityOperation::Set { + let insert_ops = entities.into_iter().map(|(entity_type, mut data)| { + data.set_vid_if_empty(); + EntityOperation::Set { key: entity_type.key(data.id()), data, - }); + } + }); transact_entity_operations( &SUBGRAPH_STORE, diff --git a/store/test-store/tests/chain/ethereum/manifest.rs b/store/test-store/tests/chain/ethereum/manifest.rs index 9089ec4f572..9d094ae5817 100644 --- a/store/test-store/tests/chain/ethereum/manifest.rs +++ b/store/test-store/tests/chain/ethereum/manifest.rs @@ -11,16 +11,17 @@ use graph::data::store::Value; use graph::data::subgraph::schema::SubgraphError; use graph::data::subgraph::{ Prune, LATEST_VERSION, SPEC_VERSION_0_0_4, SPEC_VERSION_0_0_7, SPEC_VERSION_0_0_8, - SPEC_VERSION_0_0_9, SPEC_VERSION_1_0_0, SPEC_VERSION_1_2_0, + SPEC_VERSION_0_0_9, SPEC_VERSION_1_0_0, SPEC_VERSION_1_2_0, SPEC_VERSION_1_3_0, }; use graph::data_source::offchain::OffchainDataSourceKind; -use graph::data_source::DataSourceTemplate; +use graph::data_source::{DataSourceEnum, DataSourceTemplate}; use graph::entity; use graph::env::ENV_VARS; use graph::prelude::web3::types::H256; use graph::prelude::{ anyhow, async_trait, serde_yaml, tokio, BigDecimal, BigInt, DeploymentHash, Link, Logger, - SubgraphManifest, SubgraphManifestValidationError, SubgraphStore, UnvalidatedSubgraphManifest, + SubgraphManifest, SubgraphManifestResolveError, SubgraphManifestValidationError, SubgraphStore, + UnvalidatedSubgraphManifest, }; use graph::{ blockchain::NodeCapabilities as _, @@ -37,6 +38,32 @@ const GQL_SCHEMA: &str = r#" type TestEntity @entity { id: ID! } "#; const GQL_SCHEMA_FULLTEXT: &str = include_str!("full-text.graphql"); +const SOURCE_SUBGRAPH_MANIFEST: &str = " +dataSources: [] +schema: + file: + /: /ipfs/QmSourceSchema +specVersion: 1.3.0 +"; + +const SOURCE_SUBGRAPH_SCHEMA: &str = " +type TestEntity @entity { id: ID! } +type User @entity { id: ID! } +type Profile @entity { id: ID! } + +type TokenData @entity(timeseries: true) { + id: Int8! + timestamp: Timestamp! + amount: BigDecimal! +} + +type TokenStats @aggregation(intervals: [\"hour\", \"day\"], source: \"TokenData\") { + id: Int8! + timestamp: Timestamp! + totalAmount: BigDecimal! @aggregate(fn: \"sum\", arg: \"amount\") +} +"; + const MAPPING_WITH_IPFS_FUNC_WASM: &[u8] = include_bytes!("ipfs-on-ethereum-contracts.wasm"); const ABI: &str = "[{\"type\":\"function\", \"inputs\": [{\"name\": \"i\",\"type\": \"uint256\"}],\"name\":\"get\",\"outputs\": [{\"type\": \"address\",\"name\": \"o\"}]}]"; const FILE: &str = "{}"; @@ -83,10 +110,10 @@ impl LinkResolverTrait for TextResolver { } } -async fn resolve_manifest( +async fn try_resolve_manifest( text: &str, max_spec_version: Version, -) -> SubgraphManifest { +) -> Result, anyhow::Error> { let mut resolver = TextResolver::default(); let id = DeploymentHash::new("Qmmanifest").unwrap(); @@ -94,12 +121,22 @@ async fn resolve_manifest( resolver.add("/ipfs/Qmschema", &GQL_SCHEMA); resolver.add("/ipfs/Qmabi", &ABI); resolver.add("/ipfs/Qmmapping", &MAPPING_WITH_IPFS_FUNC_WASM); + resolver.add("/ipfs/QmSource", &SOURCE_SUBGRAPH_MANIFEST); + resolver.add("/ipfs/QmSource2", &SOURCE_SUBGRAPH_MANIFEST); + resolver.add("/ipfs/QmSourceSchema", &SOURCE_SUBGRAPH_SCHEMA); resolver.add(FILE_CID, &FILE); let resolver: Arc = Arc::new(resolver); - let raw = serde_yaml::from_str(text).unwrap(); - SubgraphManifest::resolve_from_raw(id, raw, &resolver, &LOGGER, max_spec_version) + let raw = serde_yaml::from_str(text)?; + Ok(SubgraphManifest::resolve_from_raw(id, raw, &resolver, &LOGGER, max_spec_version).await?) +} + +async fn resolve_manifest( + text: &str, + max_spec_version: Version, +) -> SubgraphManifest { + try_resolve_manifest(text, max_spec_version) .await .expect("Parsing simple manifest works") } @@ -166,10 +203,162 @@ specVersion: 0.0.7 let data_source = match &manifest.templates[0] { DataSourceTemplate::Offchain(ds) => ds, DataSourceTemplate::Onchain(_) => unreachable!(), + DataSourceTemplate::Subgraph(_) => unreachable!(), }; assert_eq!(data_source.kind, OffchainDataSourceKind::Ipfs); } +#[tokio::test] +async fn subgraph_ds_manifest() { + let yaml = " +schema: + file: + /: /ipfs/Qmschema +dataSources: + - name: SubgraphSource + kind: subgraph + entities: + - Gravatar + network: mainnet + source: + address: 'QmSource' + startBlock: 9562480 + mapping: + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - TestEntity + file: + /: /ipfs/Qmmapping + handlers: + - handler: handleEntity + entity: TestEntity +specVersion: 1.3.0 +"; + + let manifest = resolve_manifest(yaml, SPEC_VERSION_1_3_0).await; + + assert_eq!("Qmmanifest", manifest.id.as_str()); + assert_eq!(manifest.data_sources.len(), 1); + let data_source = &manifest.data_sources[0]; + match data_source { + DataSourceEnum::Subgraph(ds) => { + assert_eq!(ds.name, "SubgraphSource"); + assert_eq!(ds.kind, "subgraph"); + assert_eq!(ds.source.start_block, 9562480); + } + _ => panic!("Expected a subgraph data source"), + } +} + +#[tokio::test] +async fn subgraph_ds_manifest_aggregations_should_fail() { + let yaml = " +schema: + file: + /: /ipfs/Qmschema +dataSources: + - name: SubgraphSource + kind: subgraph + entities: + - Gravatar + network: mainnet + source: + address: 'QmSource' + startBlock: 9562480 + mapping: + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - TestEntity + file: + /: /ipfs/Qmmapping + handlers: + - handler: handleEntity + entity: TokenStats # This is an aggregation and should fail +specVersion: 1.3.0 +"; + + let result = try_resolve_manifest(yaml, SPEC_VERSION_1_3_0).await; + assert!(result.is_err()); + let err = result.unwrap_err(); + assert!(err + .to_string() + .contains("Entity TokenStats is an aggregation and cannot be used as a mapping entity")); +} + +#[tokio::test] +async fn multiple_subgraph_ds_manifest() { + let yaml = " +schema: + file: + /: /ipfs/Qmschema +dataSources: + - name: SubgraphSource1 + kind: subgraph + entities: + - Gravatar + network: mainnet + source: + address: 'QmSource' + startBlock: 9562480 + mapping: + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - TestEntity + file: + /: /ipfs/Qmmapping + handlers: + - handler: handleEntity + entity: User + - name: SubgraphSource2 + kind: subgraph + entities: + - Profile + network: mainnet + source: + address: 'QmSource2' + startBlock: 9562500 + mapping: + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - TestEntity2 + file: + /: /ipfs/Qmmapping + handlers: + - handler: handleProfile + entity: Profile +specVersion: 1.3.0 +"; + + let manifest = resolve_manifest(yaml, SPEC_VERSION_1_3_0).await; + + assert_eq!("Qmmanifest", manifest.id.as_str()); + assert_eq!(manifest.data_sources.len(), 2); + + // Validate first data source + match &manifest.data_sources[0] { + DataSourceEnum::Subgraph(ds) => { + assert_eq!(ds.name, "SubgraphSource1"); + assert_eq!(ds.kind, "subgraph"); + assert_eq!(ds.source.start_block, 9562480); + } + _ => panic!("Expected a subgraph data source"), + } + + // Validate second data source + match &manifest.data_sources[1] { + DataSourceEnum::Subgraph(ds) => { + assert_eq!(ds.name, "SubgraphSource2"); + assert_eq!(ds.kind, "subgraph"); + assert_eq!(ds.source.start_block, 9562500); + } + _ => panic!("Expected a subgraph data source"), + } +} + #[tokio::test] async fn graft_manifest() { const YAML: &str = " @@ -1445,3 +1634,226 @@ dataSources: assert_eq!(4, decls.len()); }); } + +#[test] +fn parses_eth_call_decls_for_subgraph_datasource() { + const YAML: &str = " +specVersion: 1.3.0 +schema: + file: + /: /ipfs/Qmschema +features: + - ipfsOnEthereumContracts +dataSources: + - kind: subgraph + name: Factory + entities: + - Gravatar + network: mainnet + source: + address: 'QmSource' + startBlock: 9562480 + mapping: + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - TestEntity + file: + /: /ipfs/Qmmapping + abis: + - name: Factory + file: + /: /ipfs/Qmabi + handlers: + - handler: handleEntity + entity: User + calls: + fake1: Factory[entity.address].get(entity.user) + fake3: Factory[0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF].get(entity.address) + fake4: Factory[0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF].get(0xDeaDbeefdEAdbeefdEadbEEFdeadbeEFdEaDbeeF) +"; + + test_store::run_test_sequentially(|store| async move { + let store = store.subgraph_store(); + let unvalidated: UnvalidatedSubgraphManifest = { + let mut resolver = TextResolver::default(); + let id = DeploymentHash::new("Qmmanifest").unwrap(); + resolver.add(id.as_str(), &YAML); + resolver.add("/ipfs/Qmabi", &ABI); + resolver.add("/ipfs/Qmschema", &GQL_SCHEMA); + resolver.add("/ipfs/Qmmapping", &MAPPING_WITH_IPFS_FUNC_WASM); + resolver.add("/ipfs/QmSource", &SOURCE_SUBGRAPH_MANIFEST); + resolver.add("/ipfs/QmSourceSchema", &SOURCE_SUBGRAPH_SCHEMA); + + let resolver: Arc = Arc::new(resolver); + + let raw = serde_yaml::from_str(YAML).unwrap(); + UnvalidatedSubgraphManifest::resolve( + id, + raw, + &resolver, + &LOGGER, + SPEC_VERSION_1_3_0.clone(), + ) + .await + .expect("Parsing simple manifest works") + }; + + let manifest = unvalidated.validate(store.clone(), true).await.unwrap(); + let ds = &manifest.data_sources[0].as_subgraph().unwrap(); + // For more detailed tests of parsing CallDecls see the data_soure + // module in chain/ethereum + let decls = &ds.mapping.handlers[0].calls.decls; + assert_eq!(3, decls.len()); + }); +} + +#[tokio::test] +async fn mixed_subgraph_and_onchain_ds_manifest_should_fail() { + let yaml = " +schema: + file: + /: /ipfs/Qmschema +dataSources: + - name: SubgraphSource + kind: subgraph + entities: + - User + network: mainnet + source: + address: 'QmSource' + startBlock: 9562480 + mapping: + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - TestEntity + file: + /: /ipfs/Qmmapping + handlers: + - handler: handleEntity + entity: User + - kind: ethereum/contract + name: Gravity + network: mainnet + source: + address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' + abi: Gravity + startBlock: 1 + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Gravatar + abis: + - name: Gravity + file: + /: /ipfs/Qmabi + file: + /: /ipfs/Qmmapping + handlers: + - event: NewGravatar(uint256,address,string,string) + handler: handleNewGravatar +specVersion: 1.3.0 +"; + + let result = try_resolve_manifest(yaml, SPEC_VERSION_1_3_0).await; + assert!(result.is_err()); + let err = result.unwrap_err(); + assert!(err + .to_string() + .contains("Subgraph datasources cannot be used alongside onchain datasources")); +} + +#[test] +fn nested_subgraph_ds_manifest_should_fail() { + let yaml = r#" +schema: + file: + /: /ipfs/Qmschema +dataSources: +- name: SubgraphSource + kind: subgraph + entities: + - User + network: mainnet + source: + address: 'QmNestedSource' + startBlock: 9562480 + mapping: + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - TestEntity + file: + /: /ipfs/Qmmapping + handlers: + - handler: handleEntity + entity: User +specVersion: 1.3.0 +"#; + + // First modify SOURCE_SUBGRAPH_MANIFEST to include a subgraph datasource + const NESTED_SOURCE_MANIFEST: &str = r#" +schema: + file: + /: /ipfs/QmSourceSchema +dataSources: +- kind: subgraph + name: NestedSource + network: mainnet + entities: + - User + source: + address: 'QmSource' + startBlock: 1 + mapping: + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - User + file: + /: /ipfs/Qmmapping + handlers: + - handler: handleNested + entity: User +specVersion: 1.3.0 +"#; + + let mut resolver = TextResolver::default(); + let id = DeploymentHash::new("Qmmanifest").unwrap(); + + resolver.add(id.as_str(), &yaml); + resolver.add("/ipfs/Qmabi", &ABI); + resolver.add("/ipfs/Qmschema", &GQL_SCHEMA); + resolver.add("/ipfs/Qmmapping", &MAPPING_WITH_IPFS_FUNC_WASM); + resolver.add("/ipfs/QmNestedSource", &NESTED_SOURCE_MANIFEST); + resolver.add("/ipfs/QmSource", &SOURCE_SUBGRAPH_MANIFEST); + resolver.add("/ipfs/QmSourceSchema", &SOURCE_SUBGRAPH_SCHEMA); + + let resolver: Arc = Arc::new(resolver); + + let raw = serde_yaml::from_str(yaml).unwrap(); + test_store::run_test_sequentially(|_| async move { + let result: Result, _> = + UnvalidatedSubgraphManifest::resolve( + id, + raw, + &resolver, + &LOGGER, + SPEC_VERSION_1_3_0.clone(), + ) + .await; + + match result { + Ok(_) => panic!("Expected resolution to fail"), + Err(e) => { + assert!(matches!(e, SubgraphManifestResolveError::ResolveError(_))); + let error_msg = e.to_string(); + println!("{}", error_msg); + assert!(error_msg.contains("Nested subgraph data sources are not supported.")); + } + } + }) +} diff --git a/store/test-store/tests/core/interfaces.rs b/store/test-store/tests/core/interfaces.rs index 78eb2fda390..a4fc8314665 100644 --- a/store/test-store/tests/core/interfaces.rs +++ b/store/test-store/tests/core/interfaces.rs @@ -201,9 +201,9 @@ async fn reference_interface_derived() { let query = "query { events { id transaction { id } } }"; - let buy = ("BuyEvent", entity! { schema => id: "buy" }); - let sell1 = ("SellEvent", entity! { schema => id: "sell1" }); - let sell2 = ("SellEvent", entity! { schema => id: "sell2" }); + let buy = ("BuyEvent", entity! { schema => id: "buy", vid: 0i64 }); + let sell1 = ("SellEvent", entity! { schema => id: "sell1", vid: 1i64 }); + let sell2 = ("SellEvent", entity! { schema => id: "sell2", vid: 2i64 }); let gift = ( "GiftEvent", entity! { schema => id: "gift", transaction: "txn" }, @@ -278,11 +278,11 @@ async fn follow_interface_reference() { let parent = ( "Animal", - entity! { schema => id: "parent", legs: 4, parent: Value::Null }, + entity! { schema => id: "parent", legs: 4, parent: Value::Null, vid: 0i64}, ); let child = ( "Animal", - entity! { schema => id: "child", legs: 3, parent: "parent" }, + entity! { schema => id: "child", legs: 3, parent: "parent" , vid: 1i64}, ); let res = insert_and_query(subgraph_id, document, vec![parent, child], query) @@ -459,16 +459,16 @@ async fn interface_inline_fragment_with_subquery() { "; let schema = InputSchema::raw(document, subgraph_id); - let mama_cow = ("Parent", entity! { schema => id: "mama_cow" }); + let mama_cow = ("Parent", entity! { schema => id: "mama_cow", vid: 0i64 }); let cow = ( "Animal", - entity! { schema => id: "1", name: "cow", legs: 4, parent: "mama_cow" }, + entity! { schema => id: "1", name: "cow", legs: 4, parent: "mama_cow", vid: 0i64 }, ); - let mama_bird = ("Parent", entity! { schema => id: "mama_bird" }); + let mama_bird = ("Parent", entity! { schema => id: "mama_bird", vid: 1i64 }); let bird = ( "Bird", - entity! { schema => id: "2", airspeed: 5, legs: 2, parent: "mama_bird" }, + entity! { schema => id: "2", airspeed: 5, legs: 2, parent: "mama_bird", vid: 1i64 }, ); let query = "query { leggeds(orderBy: legs) { legs ... on Bird { airspeed parent { id } } } }"; @@ -545,11 +545,11 @@ async fn alias() { let parent = ( "Animal", - entity! { schema => id: "parent", legs: 4, parent: Value::Null }, + entity! { schema => id: "parent", legs: 4, parent: Value::Null, vid: 0i64 }, ); let child = ( "Animal", - entity! { schema => id: "child", legs: 3, parent: "parent" }, + entity! { schema => id: "child", legs: 3, parent: "parent", vid: 1i64 }, ); let res = insert_and_query(subgraph_id, document, vec![parent, child], query) @@ -608,9 +608,15 @@ async fn fragments_dont_panic() { "; // The panic manifests if two parents exist. - let parent = ("Parent", entity! { schema => id: "p", child: "c" }); - let parent2 = ("Parent", entity! { schema => id: "p2", child: Value::Null }); - let child = ("Child", entity! { schema => id:"c" }); + let parent = ( + "Parent", + entity! { schema => id: "p", child: "c", vid: 0i64 }, + ); + let parent2 = ( + "Parent", + entity! { schema => id: "p2", child: Value::Null, vid: 1i64 }, + ); + let child = ("Child", entity! { schema => id:"c", vid: 2i64 }); let res = insert_and_query(subgraph_id, document, vec![parent, parent2, child], query) .await @@ -668,12 +674,15 @@ async fn fragments_dont_duplicate_data() { "; // This bug manifests if two parents exist. - let parent = ("Parent", entity! { schema => id: "p", children: vec!["c"] }); + let parent = ( + "Parent", + entity! { schema => id: "p", children: vec!["c"], vid: 0i64 }, + ); let parent2 = ( "Parent", - entity! { schema => id: "b", children: Vec::::new() }, + entity! { schema => id: "b", children: Vec::::new(), vid: 1i64 }, ); - let child = ("Child", entity! { schema => id:"c" }); + let child = ("Child", entity! { schema => id:"c", vid: 2i64 }); let res = insert_and_query(subgraph_id, document, vec![parent, parent2, child], query) .await @@ -721,11 +730,11 @@ async fn redundant_fields() { let parent = ( "Animal", - entity! { schema => id: "parent", parent: Value::Null }, + entity! { schema => id: "parent", parent: Value::Null, vid: 0i64 }, ); let child = ( "Animal", - entity! { schema => id: "child", parent: "parent" }, + entity! { schema => id: "child", parent: "parent", vid: 1i64 }, ); let res = insert_and_query(subgraph_id, document, vec![parent, child], query) @@ -783,8 +792,11 @@ async fn fragments_merge_selections() { } "; - let parent = ("Parent", entity! { schema => id: "p", children: vec!["c"] }); - let child = ("Child", entity! { schema => id: "c", foo: 1 }); + let parent = ( + "Parent", + entity! { schema => id: "p", children: vec!["c"], vid: 0i64 }, + ); + let child = ("Child", entity! { schema => id: "c", foo: 1, vid: 1i64 }); let res = insert_and_query(subgraph_id, document, vec![parent, child], query) .await @@ -1081,11 +1093,11 @@ async fn enums() { let entities = vec![ ( "Trajectory", - entity! { schema => id: "1", direction: "EAST", meters: 10 }, + entity! { schema => id: "1", direction: "EAST", meters: 10, vid: 0i64 }, ), ( "Trajectory", - entity! { schema => id: "2", direction: "NORTH", meters: 15 }, + entity! { schema => id: "2", direction: "NORTH", meters: 15, vid: 1i64 }, ), ]; let query = "query { trajectories { id, direction, meters } }"; @@ -1134,15 +1146,15 @@ async fn enum_list_filters() { let entities = vec![ ( "Trajectory", - entity! { schema => id: "1", direction: "EAST", meters: 10 }, + entity! { schema => id: "1", direction: "EAST", meters: 10, vid: 0i64 }, ), ( "Trajectory", - entity! { schema => id: "2", direction: "NORTH", meters: 15 }, + entity! { schema => id: "2", direction: "NORTH", meters: 15, vid: 1i64 }, ), ( "Trajectory", - entity! { schema => id: "3", direction: "WEST", meters: 20 }, + entity! { schema => id: "3", direction: "WEST", meters: 20, vid: 2i64 }, ), ]; @@ -1327,8 +1339,8 @@ async fn derived_interface_bytes() { let entities = vec![ ("Pool", entity! { schema => id: b("0xf001") }), - ("Sell", entity! { schema => id: b("0xc0"), pool: "0xf001"}), - ("Buy", entity! { schema => id: b("0xb0"), pool: "0xf001"}), + ("Sell", entity! { schema => id: b("0xc0"), pool: "0xf001" }), + ("Buy", entity! { schema => id: b("0xb0"), pool: "0xf001" }), ]; let res = insert_and_query(subgraph_id, document, entities, query) diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index 2f41a006172..d54a88751b8 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -181,7 +181,7 @@ impl WritableStore for MockStore { } } -fn make_band_key(id: &'static str) -> EntityKey { +fn make_band_key(id: &str) -> EntityKey { SCHEMA.entity_type("Band").unwrap().parse_key(id).unwrap() } @@ -207,18 +207,21 @@ fn insert_modifications() { let store = Arc::new(store); let mut cache = EntityCache::new(store); - let mogwai_data = entity! { SCHEMA => id: "mogwai", name: "Mogwai" }; + let mut mogwai_data = entity! { SCHEMA => id: "mogwai", name: "Mogwai" }; let mogwai_key = make_band_key("mogwai"); cache - .set(mogwai_key.clone(), mogwai_data.clone(), None) + .set(mogwai_key.clone(), mogwai_data.clone(), 0, None) .unwrap(); - let sigurros_data = entity! { SCHEMA => id: "sigurros", name: "Sigur Ros" }; + let mut sigurros_data = entity! { SCHEMA => id: "sigurros", name: "Sigur Ros" }; let sigurros_key = make_band_key("sigurros"); cache - .set(sigurros_key.clone(), sigurros_data.clone(), None) + .set(sigurros_key.clone(), sigurros_data.clone(), 0, None) .unwrap(); + mogwai_data.set_vid(100).unwrap(); + sigurros_data.set_vid(101).unwrap(); + let result = cache.as_modifications(0); assert_eq!( sort_by_entity_key(result.unwrap().modifications), @@ -253,18 +256,21 @@ fn overwrite_modifications() { let store = Arc::new(store); let mut cache = EntityCache::new(store); - let mogwai_data = entity! { SCHEMA => id: "mogwai", name: "Mogwai", founded: 1995 }; + let mut mogwai_data = entity! { SCHEMA => id: "mogwai", name: "Mogwai", founded: 1995 }; let mogwai_key = make_band_key("mogwai"); cache - .set(mogwai_key.clone(), mogwai_data.clone(), None) + .set(mogwai_key.clone(), mogwai_data.clone(), 0, None) .unwrap(); - let sigurros_data = entity! { SCHEMA => id: "sigurros", name: "Sigur Ros", founded: 1994 }; + let mut sigurros_data = entity! { SCHEMA => id: "sigurros", name: "Sigur Ros", founded: 1994}; let sigurros_key = make_band_key("sigurros"); cache - .set(sigurros_key.clone(), sigurros_data.clone(), None) + .set(sigurros_key.clone(), sigurros_data.clone(), 0, None) .unwrap(); + mogwai_data.set_vid(100).unwrap(); + sigurros_data.set_vid(101).unwrap(); + let result = cache.as_modifications(0); assert_eq!( sort_by_entity_key(result.unwrap().modifications), @@ -293,12 +299,12 @@ fn consecutive_modifications() { let update_data = entity! { SCHEMA => id: "mogwai", founded: 1995, label: "Rock Action Records" }; let update_key = make_band_key("mogwai"); - cache.set(update_key, update_data, None).unwrap(); + cache.set(update_key, update_data, 0, None).unwrap(); // Then, just reset the "label". let update_data = entity! { SCHEMA => id: "mogwai", label: Value::Null }; let update_key = make_band_key("mogwai"); - cache.set(update_key.clone(), update_data, None).unwrap(); + cache.set(update_key.clone(), update_data, 0, None).unwrap(); // We expect a single overwrite modification for the above that leaves "id" // and "name" untouched, sets "founded" and removes the "label" field. @@ -307,12 +313,50 @@ fn consecutive_modifications() { sort_by_entity_key(result.unwrap().modifications), sort_by_entity_key(vec![EntityModification::overwrite( update_key, - entity! { SCHEMA => id: "mogwai", name: "Mogwai", founded: 1995 }, - 0 + entity! { SCHEMA => id: "mogwai", name: "Mogwai", founded: 1995, vid: 101i64 }, + 0, )]) ); } +#[test] +fn check_vid_sequence() { + let store = MockStore::new(BTreeMap::new()); + let store = Arc::new(store); + let mut cache = EntityCache::new(store); + + for n in 0..10 { + let id = (10 - n).to_string(); + let name = format!("Mogwai"); + let mogwai_key = make_band_key(id.as_str()); + let mogwai_data = entity! { SCHEMA => id: id, name: name }; + cache + .set(mogwai_key.clone(), mogwai_data.clone(), 0, None) + .unwrap(); + } + + let result = cache.as_modifications(0); + let mods = result.unwrap().modifications; + for m in mods { + match m { + EntityModification::Insert { + key: _, + data, + block: _, + end: _, + } => { + let id = data.id().to_string(); + let insert_order = data.vid() - 100; + // check that the order of the insertions matches VID order by comparing + // it to the value of the ID (which is inserted in decreasing order) + let id_value = 10 - insert_order; + assert_eq!(id, format!("{}", id_value)); + } + _ => panic!("wrong entity modification type"), + } + } +} + const ACCOUNT_GQL: &str = " type Account @entity { id: ID! @@ -404,7 +448,7 @@ where async fn insert_test_data(store: Arc) -> DeploymentLocator { let manifest = SubgraphManifest:: { id: LOAD_RELATED_ID.clone(), - spec_version: Version::new(1, 0, 0), + spec_version: Version::new(1, 3, 0), features: Default::default(), description: None, repository: None, @@ -432,17 +476,17 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator .unwrap(); // 1 account 3 wallets - let test_entity_1 = create_account_entity("1", "Johnton", "tonofjohn@email.com", 67_i32); + let test_entity_1 = create_account_entity("1", "Johnton", "tonofjohn@email.com", 67_i32, 1); let id_one = WALLET_TYPE.parse_id("1").unwrap(); - let wallet_entity_1 = create_wallet_operation("1", &id_one, 67_i32); - let wallet_entity_2 = create_wallet_operation("2", &id_one, 92_i32); - let wallet_entity_3 = create_wallet_operation("3", &id_one, 192_i32); + let wallet_entity_1 = create_wallet_operation("1", &id_one, 67_i32, 1); + let wallet_entity_2 = create_wallet_operation("2", &id_one, 92_i32, 2); + let wallet_entity_3 = create_wallet_operation("3", &id_one, 192_i32, 3); // 1 account 1 wallet - let test_entity_2 = create_account_entity("2", "Cindini", "dinici@email.com", 42_i32); + let test_entity_2 = create_account_entity("2", "Cindini", "dinici@email.com", 42_i32, 2); let id_two = WALLET_TYPE.parse_id("2").unwrap(); - let wallet_entity_4 = create_wallet_operation("4", &id_two, 32_i32); + let wallet_entity_4 = create_wallet_operation("4", &id_two, 32_i32, 4); // 1 account 0 wallets - let test_entity_3 = create_account_entity("3", "Shaqueeena", "queensha@email.com", 28_i32); + let test_entity_3 = create_account_entity("3", "Shaqueeena", "queensha@email.com", 28_i32, 3); transact_entity_operations( &store, &deployment, @@ -462,9 +506,9 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator deployment } -fn create_account_entity(id: &str, name: &str, email: &str, age: i32) -> EntityOperation { +fn create_account_entity(id: &str, name: &str, email: &str, age: i32, vid: i64) -> EntityOperation { let test_entity = - entity! { LOAD_RELATED_SUBGRAPH => id: id, name: name, email: email, age: age }; + entity! { LOAD_RELATED_SUBGRAPH => id: id, name: name, email: email, age: age, vid: vid}; EntityOperation::Set { key: ACCOUNT_TYPE.parse_key(id).unwrap(), @@ -472,12 +516,18 @@ fn create_account_entity(id: &str, name: &str, email: &str, age: i32) -> EntityO } } -fn create_wallet_entity(id: &str, account_id: &Id, balance: i32) -> Entity { +fn create_wallet_entity(id: &str, account_id: &Id, balance: i32, vid: i64) -> Entity { + let account_id = Value::from(account_id.clone()); + entity! { LOAD_RELATED_SUBGRAPH => id: id, account: account_id, balance: balance, vid: vid} +} + +fn create_wallet_entity_no_vid(id: &str, account_id: &Id, balance: i32) -> Entity { let account_id = Value::from(account_id.clone()); - entity! { LOAD_RELATED_SUBGRAPH => id: id, account: account_id, balance: balance } + entity! { LOAD_RELATED_SUBGRAPH => id: id, account: account_id, balance: balance} } -fn create_wallet_operation(id: &str, account_id: &Id, balance: i32) -> EntityOperation { - let test_wallet = create_wallet_entity(id, account_id, balance); + +fn create_wallet_operation(id: &str, account_id: &Id, balance: i32, vid: i64) -> EntityOperation { + let test_wallet = create_wallet_entity(id, account_id, balance, vid); EntityOperation::Set { key: WALLET_TYPE.parse_key(id).unwrap(), data: test_wallet, @@ -495,9 +545,9 @@ fn check_for_account_with_multiple_wallets() { causality_region: CausalityRegion::ONCHAIN, }; let result = cache.load_related(&request).unwrap(); - let wallet_1 = create_wallet_entity("1", &account_id, 67_i32); - let wallet_2 = create_wallet_entity("2", &account_id, 92_i32); - let wallet_3 = create_wallet_entity("3", &account_id, 192_i32); + let wallet_1 = create_wallet_entity("1", &account_id, 67_i32, 1); + let wallet_2 = create_wallet_entity("2", &account_id, 92_i32, 2); + let wallet_3 = create_wallet_entity("3", &account_id, 192_i32, 3); let expeted_vec = vec![wallet_1, wallet_2, wallet_3]; assert_eq!(result, expeted_vec); @@ -515,7 +565,7 @@ fn check_for_account_with_single_wallet() { causality_region: CausalityRegion::ONCHAIN, }; let result = cache.load_related(&request).unwrap(); - let wallet_1 = create_wallet_entity("4", &account_id, 32_i32); + let wallet_1 = create_wallet_entity("4", &account_id, 32_i32, 4); let expeted_vec = vec![wallet_1]; assert_eq!(result, expeted_vec); @@ -581,8 +631,8 @@ fn check_for_insert_async_store() { run_store_test(|mut cache, store, deployment, _writable| async move { let account_id = ACCOUNT_TYPE.parse_id("2").unwrap(); // insert a new wallet - let wallet_entity_5 = create_wallet_operation("5", &account_id, 79_i32); - let wallet_entity_6 = create_wallet_operation("6", &account_id, 200_i32); + let wallet_entity_5 = create_wallet_operation("5", &account_id, 79_i32, 12); + let wallet_entity_6 = create_wallet_operation("6", &account_id, 200_i32, 13); transact_entity_operations( &store, @@ -599,9 +649,9 @@ fn check_for_insert_async_store() { causality_region: CausalityRegion::ONCHAIN, }; let result = cache.load_related(&request).unwrap(); - let wallet_1 = create_wallet_entity("4", &account_id, 32_i32); - let wallet_2 = create_wallet_entity("5", &account_id, 79_i32); - let wallet_3 = create_wallet_entity("6", &account_id, 200_i32); + let wallet_1 = create_wallet_entity("4", &account_id, 32_i32, 4); + let wallet_2 = create_wallet_entity("5", &account_id, 79_i32, 12); + let wallet_3 = create_wallet_entity("6", &account_id, 200_i32, 13); let expeted_vec = vec![wallet_1, wallet_2, wallet_3]; assert_eq!(result, expeted_vec); @@ -612,8 +662,8 @@ fn check_for_insert_async_not_related() { run_store_test(|mut cache, store, deployment, _writable| async move { let account_id = ACCOUNT_TYPE.parse_id("2").unwrap(); // insert a new wallet - let wallet_entity_5 = create_wallet_operation("5", &account_id, 79_i32); - let wallet_entity_6 = create_wallet_operation("6", &account_id, 200_i32); + let wallet_entity_5 = create_wallet_operation("5", &account_id, 79_i32, 5); + let wallet_entity_6 = create_wallet_operation("6", &account_id, 200_i32, 6); transact_entity_operations( &store, @@ -631,9 +681,9 @@ fn check_for_insert_async_not_related() { causality_region: CausalityRegion::ONCHAIN, }; let result = cache.load_related(&request).unwrap(); - let wallet_1 = create_wallet_entity("1", &account_id, 67_i32); - let wallet_2 = create_wallet_entity("2", &account_id, 92_i32); - let wallet_3 = create_wallet_entity("3", &account_id, 192_i32); + let wallet_1 = create_wallet_entity("1", &account_id, 67_i32, 1); + let wallet_2 = create_wallet_entity("2", &account_id, 92_i32, 2); + let wallet_3 = create_wallet_entity("3", &account_id, 192_i32, 3); let expeted_vec = vec![wallet_1, wallet_2, wallet_3]; assert_eq!(result, expeted_vec); @@ -645,7 +695,7 @@ fn check_for_update_async_related() { run_store_test(|mut cache, store, deployment, writable| async move { let entity_key = WALLET_TYPE.parse_key("1").unwrap(); let account_id = entity_key.entity_id.clone(); - let wallet_entity_update = create_wallet_operation("1", &account_id, 79_i32); + let wallet_entity_update = create_wallet_operation("1", &account_id, 79_i32, 11); let new_data = match wallet_entity_update { EntityOperation::Set { ref data, .. } => data.clone(), @@ -669,8 +719,8 @@ fn check_for_update_async_related() { causality_region: CausalityRegion::ONCHAIN, }; let result = cache.load_related(&request).unwrap(); - let wallet_2 = create_wallet_entity("2", &account_id, 92_i32); - let wallet_3 = create_wallet_entity("3", &account_id, 192_i32); + let wallet_2 = create_wallet_entity("2", &account_id, 92_i32, 2); + let wallet_3 = create_wallet_entity("3", &account_id, 192_i32, 3); let expeted_vec = vec![new_data, wallet_2, wallet_3]; assert_eq!(result, expeted_vec); @@ -699,40 +749,43 @@ fn check_for_delete_async_related() { causality_region: CausalityRegion::ONCHAIN, }; let result = cache.load_related(&request).unwrap(); - let wallet_2 = create_wallet_entity("2", &account_id, 92_i32); - let wallet_3 = create_wallet_entity("3", &account_id, 192_i32); + let wallet_2 = create_wallet_entity("2", &account_id, 92_i32, 2); + let wallet_3 = create_wallet_entity("3", &account_id, 192_i32, 3); let expeted_vec = vec![wallet_2, wallet_3]; assert_eq!(result, expeted_vec); }); } - #[test] fn scoped_get() { run_store_test(|mut cache, _store, _deployment, _writable| async move { // Key for an existing entity that is in the store let account1 = ACCOUNT_TYPE.parse_id("1").unwrap(); let key1 = WALLET_TYPE.parse_key("1").unwrap(); - let wallet1 = create_wallet_entity("1", &account1, 67); + let wallet1 = create_wallet_entity_no_vid("1", &account1, 67); // Create a new entity that is not in the store let account5 = ACCOUNT_TYPE.parse_id("5").unwrap(); - let wallet5 = create_wallet_entity("5", &account5, 100); + let mut wallet5 = create_wallet_entity_no_vid("5", &account5, 100); let key5 = WALLET_TYPE.parse_key("5").unwrap(); - cache.set(key5.clone(), wallet5.clone(), None).unwrap(); + cache.set(key5.clone(), wallet5.clone(), 0, None).unwrap(); + wallet5.set_vid(100).unwrap(); // For the new entity, we can retrieve it with either scope let act5 = cache.get(&key5, GetScope::InBlock).unwrap(); assert_eq!(Some(&wallet5), act5.as_ref().map(|e| e.as_ref())); let act5 = cache.get(&key5, GetScope::Store).unwrap(); assert_eq!(Some(&wallet5), act5.as_ref().map(|e| e.as_ref())); + let mut wallet1a = wallet1.clone(); + wallet1a.set_vid(1).unwrap(); // For an entity in the store, we can not get it `InBlock` but with // `Store` let act1 = cache.get(&key1, GetScope::InBlock).unwrap(); assert_eq!(None, act1); let act1 = cache.get(&key1, GetScope::Store).unwrap(); - assert_eq!(Some(&wallet1), act1.as_ref().map(|e| e.as_ref())); + assert_eq!(Some(&wallet1a), act1.as_ref().map(|e| e.as_ref())); + // Even after reading from the store, the entity is not visible with // `InBlock` let act1 = cache.get(&key1, GetScope::InBlock).unwrap(); @@ -740,11 +793,13 @@ fn scoped_get() { // But if it gets updated, it becomes visible with either scope let mut wallet1 = wallet1; wallet1.set("balance", 70).unwrap(); - cache.set(key1.clone(), wallet1.clone(), None).unwrap(); + cache.set(key1.clone(), wallet1.clone(), 0, None).unwrap(); + wallet1a = wallet1; + wallet1a.set_vid(101).unwrap(); let act1 = cache.get(&key1, GetScope::InBlock).unwrap(); - assert_eq!(Some(&wallet1), act1.as_ref().map(|e| e.as_ref())); + assert_eq!(Some(&wallet1a), act1.as_ref().map(|e| e.as_ref())); let act1 = cache.get(&key1, GetScope::Store).unwrap(); - assert_eq!(Some(&wallet1), act1.as_ref().map(|e| e.as_ref())); + assert_eq!(Some(&wallet1a), act1.as_ref().map(|e| e.as_ref())); }) } @@ -787,6 +842,6 @@ fn no_interface_mods() { let entity = entity! { LOAD_RELATED_SUBGRAPH => id: "1", balance: 100 }; - cache.set(key, entity, None).unwrap_err(); + cache.set(key, entity, 0, None).unwrap_err(); }) } diff --git a/store/test-store/tests/graphql/query.rs b/store/test-store/tests/graphql/query.rs index 08ad26ef9b9..d7e7dec8f55 100644 --- a/store/test-store/tests/graphql/query.rs +++ b/store/test-store/tests/graphql/query.rs @@ -424,9 +424,12 @@ async fn insert_test_entities( .into_iter() .map(|(typename, entities)| { let entity_type = schema.entity_type(typename).unwrap(); - entities.into_iter().map(move |data| EntityOperation::Set { - key: entity_type.key(data.id()), - data, + entities.into_iter().map(move |mut data| { + data.set_vid_if_empty(); + EntityOperation::Set { + key: entity_type.key(data.id()), + data, + } }) }) .flatten() @@ -468,115 +471,118 @@ async fn insert_test_entities( ( "Musician", vec![ - entity! { is => id: "m1", name: "John", mainBand: "b1", bands: vec!["b1", "b2"], favoriteCount: 10, birthDate: timestamp.clone() }, - entity! { is => id: "m2", name: "Lisa", mainBand: "b1", bands: vec!["b1"], favoriteCount: 100, birthDate: timestamp.clone() }, + entity! { is => id: "m1", name: "John", mainBand: "b1", bands: vec!["b1", "b2"], favoriteCount: 10, birthDate: timestamp.clone(), vid: 0i64 }, + entity! { is => id: "m2", name: "Lisa", mainBand: "b1", bands: vec!["b1"], favoriteCount: 100, birthDate: timestamp.clone(), vid: 1i64 }, ], ), - ("Publisher", vec![entity! { is => id: pub1 }]), + ("Publisher", vec![entity! { is => id: pub1, vid: 0i64 }]), ( "Band", vec![ - entity! { is => id: "b1", name: "The Musicians", originalSongs: vec![s[1], s[2]] }, - entity! { is => id: "b2", name: "The Amateurs", originalSongs: vec![s[1], s[3], s[4]] }, + entity! { is => id: "b1", name: "The Musicians", originalSongs: vec![s[1], s[2]], vid: 0i64 }, + entity! { is => id: "b2", name: "The Amateurs", originalSongs: vec![s[1], s[3], s[4]], vid: 1i64 }, ], ), ( "Song", vec![ - entity! { is => id: s[1], sid: "s1", title: "Cheesy Tune", publisher: pub1, writtenBy: "m1", media: vec![md[1], md[2]] }, - entity! { is => id: s[2], sid: "s2", title: "Rock Tune", publisher: pub1, writtenBy: "m2", media: vec![md[3], md[4]] }, - entity! { is => id: s[3], sid: "s3", title: "Pop Tune", publisher: pub1, writtenBy: "m1", media: vec![md[5]] }, - entity! { is => id: s[4], sid: "s4", title: "Folk Tune", publisher: pub1, writtenBy: "m3", media: vec![md[6]] }, + entity! { is => id: s[1], sid: "s1", title: "Cheesy Tune", publisher: pub1, writtenBy: "m1", media: vec![md[1], md[2]], vid: 0i64 }, + entity! { is => id: s[2], sid: "s2", title: "Rock Tune", publisher: pub1, writtenBy: "m2", media: vec![md[3], md[4]], vid: 1i64 }, + entity! { is => id: s[3], sid: "s3", title: "Pop Tune", publisher: pub1, writtenBy: "m1", media: vec![md[5]], vid: 2i64 }, + entity! { is => id: s[4], sid: "s4", title: "Folk Tune", publisher: pub1, writtenBy: "m3", media: vec![md[6]], vid: 3i64 }, ], ), ( "User", vec![ - entity! { is => id: "u1", name: "User 1", latestSongReview: "r3", latestBandReview: "r1", latestReview: "r3" }, + entity! { is => id: "u1", name: "User 1", latestSongReview: "r3", latestBandReview: "r1", latestReview: "r3", vid: 0i64 }, ], ), ( "SongStat", vec![ - entity! { is => id: s[1], played: 10 }, - entity! { is => id: s[2], played: 15 }, + entity! { is => id: s[1], played: 10, vid: 0i64 }, + entity! { is => id: s[2], played: 15, vid: 1i64 }, ], ), ( "BandReview", vec![ - entity! { is => id: "r1", body: "Bad musicians", band: "b1", author: "u1" }, - entity! { is => id: "r2", body: "Good amateurs", band: "b2", author: "u2" }, - entity! { is => id: "r5", body: "Very Bad musicians", band: "b1", author: "u3" }, + entity! { is => id: "r1", body: "Bad musicians", band: "b1", author: "u1", vid: 0i64 }, + entity! { is => id: "r2", body: "Good amateurs", band: "b2", author: "u2", vid: 1i64 }, + entity! { is => id: "r5", body: "Very Bad musicians", band: "b1", author: "u3", vid: 2i64 }, ], ), ( "SongReview", vec![ - entity! { is => id: "r3", body: "Bad", song: s[2], author: "u1" }, - entity! { is => id: "r4", body: "Good", song: s[3], author: "u2" }, - entity! { is => id: "r6", body: "Very Bad", song: s[2], author: "u3" }, + entity! { is => id: "r3", body: "Bad", song: s[2], author: "u1", vid: 0i64 }, + entity! { is => id: "r4", body: "Good", song: s[3], author: "u2", vid: 1i64 }, + entity! { is => id: "r6", body: "Very Bad", song: s[2], author: "u3", vid: 2i64 }, ], ), ( "User", vec![ - entity! { is => id: "u1", name: "Baden", latestSongReview: "r3", latestBandReview: "r1", latestReview: "r1" }, - entity! { is => id: "u2", name: "Goodwill", latestSongReview: "r4", latestBandReview: "r2", latestReview: "r2" }, + entity! { is => id: "u1", name: "Baden", latestSongReview: "r3", latestBandReview: "r1", latestReview: "r1", vid: 0i64 }, + entity! { is => id: "u2", name: "Goodwill", latestSongReview: "r4", latestBandReview: "r2", latestReview: "r2", vid: 1i64 }, ], ), ( "AnonymousUser", vec![ - entity! { is => id: "u3", name: "Anonymous 3", latestSongReview: "r6", latestBandReview: "r5", latestReview: "r5" }, + entity! { is => id: "u3", name: "Anonymous 3", latestSongReview: "r6", latestBandReview: "r5", latestReview: "r5", vid: 0i64 }, ], ), ( "Photo", vec![ - entity! { is => id: md[1], title: "Cheesy Tune Single Cover", author: "u1" }, - entity! { is => id: md[3], title: "Rock Tune Single Cover", author: "u1" }, - entity! { is => id: md[5], title: "Pop Tune Single Cover", author: "u1" }, + entity! { is => id: md[1], title: "Cheesy Tune Single Cover", author: "u1", vid: 0i64 }, + entity! { is => id: md[3], title: "Rock Tune Single Cover", author: "u1", vid: 1i64 }, + entity! { is => id: md[5], title: "Pop Tune Single Cover", author: "u1", vid: 2i64 }, ], ), ( "Video", vec![ - entity! { is => id: md[2], title: "Cheesy Tune Music Video", author: "u2" }, - entity! { is => id: md[4], title: "Rock Tune Music Video", author: "u2" }, - entity! { is => id: md[6], title: "Folk Tune Music Video", author: "u2" }, + entity! { is => id: md[2], title: "Cheesy Tune Music Video", author: "u2", vid: 0i64 }, + entity! { is => id: md[4], title: "Rock Tune Music Video", author: "u2", vid: 1i64 }, + entity! { is => id: md[6], title: "Folk Tune Music Video", author: "u2", vid: 2i64 }, ], ), ( "Album", - vec![entity! { is => id: "rl1", title: "Pop and Folk", songs: vec![s[3], s[4]] }], + vec![ + entity! { is => id: "rl1", title: "Pop and Folk", songs: vec![s[3], s[4]], vid: 0i64 }, + ], ), ( "Single", vec![ - entity! { is => id: "rl2", title: "Rock", songs: vec![s[2]] }, - entity! { is => id: "rl3", title: "Cheesy", songs: vec![s[1]] }, - entity! { is => id: "rl4", title: "Silence", songs: Vec::::new() }, + entity! { is => id: "rl2", title: "Rock", songs: vec![s[2]], vid: 0i64 }, + entity! { is => id: "rl3", title: "Cheesy", songs: vec![s[1]], vid: 1i64 }, + entity! { is => id: "rl4", title: "Silence", songs: Vec::::new(), vid: 2i64 }, ], ), ( "Plays", vec![ - entity! { is => id: 1i64, timestamp: ts0, song: s[1], user: "u1"}, - entity! { is => id: 2i64, timestamp: ts0, song: s[1], user: "u2"}, - entity! { is => id: 3i64, timestamp: ts0, song: s[2], user: "u1"}, - entity! { is => id: 4i64, timestamp: ts0, song: s[1], user: "u1"}, - entity! { is => id: 5i64, timestamp: ts0, song: s[1], user: "u1"}, + entity! { is => id: 1i64, timestamp: ts0, song: s[1], user: "u1", vid: 0i64 }, + entity! { is => id: 2i64, timestamp: ts0, song: s[1], user: "u2", vid: 1i64 }, + entity! { is => id: 3i64, timestamp: ts0, song: s[2], user: "u1", vid: 2i64 }, + entity! { is => id: 4i64, timestamp: ts0, song: s[1], user: "u1", vid: 3i64 }, + entity! { is => id: 5i64, timestamp: ts0, song: s[1], user: "u1", vid: 4i64 }, ], ), ]; + let entities0 = insert_ops(&manifest.schema, entities0); let entities1 = vec![( "Musician", vec![ - entity! { is => id: "m3", name: "Tom", mainBand: "b2", bands: vec!["b1", "b2"], favoriteCount: 5, birthDate: timestamp.clone() }, - entity! { is => id: "m4", name: "Valerie", bands: Vec::::new(), favoriteCount: 20, birthDate: timestamp.clone() }, + entity! { is => id: "m3", name: "Tom", mainBand: "b2", bands: vec!["b1", "b2"], favoriteCount: 5, birthDate: timestamp.clone(), vid: 2i64 }, + entity! { is => id: "m4", name: "Valerie", bands: Vec::::new(), favoriteCount: 20, birthDate: timestamp.clone(), vid: 3i64 }, ], )]; let entities1 = insert_ops(&manifest.schema, entities1); diff --git a/store/test-store/tests/postgres/aggregation.rs b/store/test-store/tests/postgres/aggregation.rs index 432bc685a62..b131cb4a323 100644 --- a/store/test-store/tests/postgres/aggregation.rs +++ b/store/test-store/tests/postgres/aggregation.rs @@ -79,9 +79,10 @@ pub async fn insert( let schema = ReadStore::input_schema(store); let ops = entities .into_iter() - .map(|data| { + .map(|mut data| { let data_type = schema.entity_type("Data").unwrap(); let key = data_type.key(data.id()); + data.set_vid_if_empty(); EntityOperation::Set { data, key } }) .collect(); @@ -125,8 +126,8 @@ async fn insert_test_data(store: Arc, deployment: DeploymentL let ts64 = TIMES[0]; let entities = vec![ - entity! { schema => id: 1i64, timestamp: ts64, token: TOKEN1.clone(), price: bd(1), amount: bd(10) }, - entity! { schema => id: 2i64, timestamp: ts64, token: TOKEN2.clone(), price: bd(1), amount: bd(1) }, + entity! { schema => id: 1i64, timestamp: ts64, token: TOKEN1.clone(), price: bd(1), amount: bd(10), vid: 11i64 }, + entity! { schema => id: 2i64, timestamp: ts64, token: TOKEN2.clone(), price: bd(1), amount: bd(1), vid: 12i64 }, ]; insert(&store, &deployment, BLOCKS[0].clone(), TIMES[0], entities) @@ -135,8 +136,8 @@ async fn insert_test_data(store: Arc, deployment: DeploymentL let ts64 = TIMES[1]; let entities = vec![ - entity! { schema => id: 11i64, timestamp: ts64, token: TOKEN1.clone(), price: bd(2), amount: bd(2) }, - entity! { schema => id: 12i64, timestamp: ts64, token: TOKEN2.clone(), price: bd(2), amount: bd(20) }, + entity! { schema => id: 11i64, timestamp: ts64, token: TOKEN1.clone(), price: bd(2), amount: bd(2), vid: 21i64 }, + entity! { schema => id: 12i64, timestamp: ts64, token: TOKEN2.clone(), price: bd(2), amount: bd(20), vid: 22i64 }, ]; insert(&store, &deployment, BLOCKS[1].clone(), TIMES[1], entities) .await @@ -144,8 +145,8 @@ async fn insert_test_data(store: Arc, deployment: DeploymentL let ts64 = TIMES[2]; let entities = vec![ - entity! { schema => id: 21i64, timestamp: ts64, token: TOKEN1.clone(), price: bd(3), amount: bd(30) }, - entity! { schema => id: 22i64, timestamp: ts64, token: TOKEN2.clone(), price: bd(3), amount: bd(3) }, + entity! { schema => id: 21i64, timestamp: ts64, token: TOKEN1.clone(), price: bd(3), amount: bd(30), vid: 31i64 }, + entity! { schema => id: 22i64, timestamp: ts64, token: TOKEN2.clone(), price: bd(3), amount: bd(3), vid: 32i64 }, ]; insert(&store, &deployment, BLOCKS[2].clone(), TIMES[2], entities) .await @@ -153,8 +154,8 @@ async fn insert_test_data(store: Arc, deployment: DeploymentL let ts64 = TIMES[3]; let entities = vec![ - entity! { schema => id: 31i64, timestamp: ts64, token: TOKEN1.clone(), price: bd(4), amount: bd(4) }, - entity! { schema => id: 32i64, timestamp: ts64, token: TOKEN2.clone(), price: bd(4), amount: bd(40) }, + entity! { schema => id: 31i64, timestamp: ts64, token: TOKEN1.clone(), price: bd(4), amount: bd(4), vid: 41i64 }, + entity! { schema => id: 32i64, timestamp: ts64, token: TOKEN2.clone(), price: bd(4), amount: bd(40), vid: 42i64 }, ]; insert(&store, &deployment, BLOCKS[3].clone(), TIMES[3], entities) .await @@ -173,10 +174,10 @@ fn stats_hour(schema: &InputSchema) -> Vec> { let block2 = vec![ entity! { schema => id: 11i64, timestamp: ts2, token: TOKEN1.clone(), sum: bd(3), sum_sq: bd(5), max: bd(10), first: bd(10), last: bd(2), - value: bd(14), totalValue: bd(14) }, + value: bd(14), totalValue: bd(14), vid: 1i64 }, entity! { schema => id: 12i64, timestamp: ts2, token: TOKEN2.clone(), sum: bd(3), sum_sq: bd(5), max: bd(20), first: bd(1), last: bd(20), - value: bd(41), totalValue: bd(41) }, + value: bd(41), totalValue: bd(41), vid: 2i64 }, ]; let ts3 = BlockTime::since_epoch(3600, 0); @@ -186,10 +187,10 @@ fn stats_hour(schema: &InputSchema) -> Vec> { let mut v2 = vec![ entity! { schema => id: 21i64, timestamp: ts3, token: TOKEN1.clone(), sum: bd(3), sum_sq: bd(9), max: bd(30), first: bd(30), last: bd(30), - value: bd(90), totalValue: bd(104) }, + value: bd(90), totalValue: bd(104), vid: 3i64 }, entity! { schema => id: 22i64, timestamp: ts3, token: TOKEN2.clone(), sum: bd(3), sum_sq: bd(9), max: bd(3), first: bd(3), last: bd(3), - value: bd(9), totalValue: bd(50)}, + value: bd(9), totalValue: bd(50), vid: 4i64 }, ]; v1.append(&mut v2); v1 diff --git a/store/test-store/tests/postgres/graft.rs b/store/test-store/tests/postgres/graft.rs index 1580a62b1aa..d9da064ff66 100644 --- a/store/test-store/tests/postgres/graft.rs +++ b/store/test-store/tests/postgres/graft.rs @@ -136,7 +136,7 @@ where async fn insert_test_data(store: Arc) -> DeploymentLocator { let manifest = SubgraphManifest:: { id: TEST_SUBGRAPH_ID.clone(), - spec_version: Version::new(1, 0, 0), + spec_version: Version::new(1, 3, 0), features: Default::default(), description: None, repository: None, @@ -175,6 +175,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator 184.4, false, None, + 0, ); transact_entity_operations(&store, &deployment, BLOCKS[0].clone(), vec![test_entity_1]) .await @@ -189,6 +190,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator 159.1, true, Some("red"), + 1, ); let test_entity_3_1 = create_test_entity( "3", @@ -199,6 +201,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator 111.7, false, Some("blue"), + 2, ); transact_entity_operations( &store, @@ -218,6 +221,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator 111.7, false, None, + 3, ); transact_entity_operations( &store, @@ -241,6 +245,7 @@ fn create_test_entity( weight: f64, coffee: bool, favorite_color: Option<&str>, + vid: i64, ) -> EntityOperation { let bin_name = scalar::Bytes::from_str(&hex::encode(name)).unwrap(); let test_entity = entity! { TEST_SUBGRAPH_SCHEMA => @@ -252,7 +257,8 @@ fn create_test_entity( seconds_age: age * 31557600, weight: Value::BigDecimal(weight.into()), coffee: coffee, - favorite_color: favorite_color + favorite_color: favorite_color, + vid: vid, }; let entity_type = TEST_SUBGRAPH_SCHEMA.entity_type(entity_type).unwrap(); @@ -324,6 +330,7 @@ async fn check_graft( // Make our own entries for block 2 shaq.set("email", "shaq@gmail.com").unwrap(); + let _ = shaq.set_vid(3); let op = EntityOperation::Set { key: user_type.parse_key("3").unwrap(), data: shaq, @@ -601,6 +608,7 @@ fn prune() { 157.1, true, Some("red"), + 4, ); transact_and_wait(&store, &src, BLOCKS[5].clone(), vec![user2]) .await diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index f1be71a6ed8..5d01bd3c510 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -212,11 +212,13 @@ lazy_static! { bigInt: big_int.clone(), bigIntArray: vec![big_int.clone(), (big_int + 1.into())], color: "yellow", + vid: 0i64, } }; static ref EMPTY_NULLABLESTRINGS_ENTITY: Entity = { entity! { THINGS_SCHEMA => id: "one", + vid: 0i64, } }; static ref SCALAR_TYPE: EntityType = THINGS_SCHEMA.entity_type("Scalar").unwrap(); @@ -325,6 +327,7 @@ fn insert_user_entity( drinks: Option>, visits: i64, block: BlockNumber, + vid: i64, ) { let user = make_user( &layout.input_schema, @@ -337,6 +340,7 @@ fn insert_user_entity( favorite_color, drinks, visits, + vid, ); insert_entity_at(conn, layout, entity_type, vec![user], block); @@ -353,6 +357,7 @@ fn make_user( favorite_color: Option<&str>, drinks: Option>, visits: i64, + vid: i64, ) -> Entity { let favorite_color = favorite_color .map(|s| Value::String(s.to_owned())) @@ -368,7 +373,8 @@ fn make_user( weight: BigDecimal::from(weight), coffee: coffee, favorite_color: favorite_color, - visits: visits + visits: visits, + vid: vid, }; if let Some(drinks) = drinks { user.insert("drinks", drinks.into()).unwrap(); @@ -391,6 +397,7 @@ fn insert_users(conn: &mut PgConnection, layout: &Layout) { None, 60, 0, + 0, ); insert_user_entity( conn, @@ -406,6 +413,7 @@ fn insert_users(conn: &mut PgConnection, layout: &Layout) { Some(vec!["beer", "wine"]), 50, 0, + 1, ); insert_user_entity( conn, @@ -421,6 +429,7 @@ fn insert_users(conn: &mut PgConnection, layout: &Layout) { Some(vec!["coffee", "tea"]), 22, 0, + 2, ); } @@ -438,6 +447,7 @@ fn update_user_entity( drinks: Option>, visits: i64, block: BlockNumber, + vid: i64, ) { let user = make_user( &layout.input_schema, @@ -450,6 +460,7 @@ fn update_user_entity( favorite_color, drinks, visits, + vid, ); update_entity_at(conn, layout, entity_type, vec![user], block); } @@ -461,17 +472,19 @@ fn insert_pet( id: &str, name: &str, block: BlockNumber, + vid: i64, ) { let pet = entity! { layout.input_schema => id: id, - name: name + name: name, + vid: vid, }; insert_entity_at(conn, layout, entity_type, vec![pet], block); } fn insert_pets(conn: &mut PgConnection, layout: &Layout) { - insert_pet(conn, layout, &*DOG_TYPE, "pluto", "Pluto", 0); - insert_pet(conn, layout, &*CAT_TYPE, "garfield", "Garfield", 0); + insert_pet(conn, layout, &*DOG_TYPE, "pluto", "Pluto", 0, 0); + insert_pet(conn, layout, &*CAT_TYPE, "garfield", "Garfield", 0, 1); } fn create_schema(conn: &mut PgConnection) -> Layout { @@ -604,6 +617,7 @@ fn update() { entity.set("string", "updated").unwrap(); entity.remove("strings"); entity.set("bool", Value::Null).unwrap(); + entity.set("vid", 1i64).unwrap(); let key = SCALAR_TYPE.key(entity.id()); let entity_type = layout.input_schema.entity_type("Scalar").unwrap(); @@ -631,8 +645,10 @@ fn update_many() { let mut one = SCALAR_ENTITY.clone(); let mut two = SCALAR_ENTITY.clone(); two.set("id", "two").unwrap(); + two.set("vid", 1i64).unwrap(); let mut three = SCALAR_ENTITY.clone(); three.set("id", "three").unwrap(); + three.set("vid", 2i64).unwrap(); insert_entity( conn, layout, @@ -654,6 +670,10 @@ fn update_many() { three.remove("strings"); three.set("color", "red").unwrap(); + one.set("vid", 3i64).unwrap(); + two.set("vid", 4i64).unwrap(); + three.set("vid", 5i64).unwrap(); + // generate keys let entity_type = layout.input_schema.entity_type("Scalar").unwrap(); let keys: Vec = ["one", "two", "three"] @@ -720,10 +740,13 @@ fn serialize_bigdecimal() { // Update with overwrite let mut entity = SCALAR_ENTITY.clone(); + let mut vid = 1i64; for d in &["50", "50.00", "5000", "0.5000", "0.050", "0.5", "0.05"] { let d = BigDecimal::from_str(d).unwrap(); entity.set("bigDecimal", d).unwrap(); + entity.set("vid", vid).unwrap(); + vid += 1; let key = SCALAR_TYPE.key(entity.id()); let entity_type = layout.input_schema.entity_type("Scalar").unwrap(); @@ -755,7 +778,8 @@ fn enum_arrays() { let spectrum = entity! { THINGS_SCHEMA => id: "rainbow", main: "yellow", - all: vec!["yellow", "red", "BLUE"] + all: vec!["yellow", "red", "BLUE"], + vid: 0i64 }; insert_entity( @@ -803,6 +827,7 @@ fn delete() { insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]); let mut two = SCALAR_ENTITY.clone(); two.set("id", "two").unwrap(); + two.set("vid", 1i64).unwrap(); insert_entity(conn, layout, &*SCALAR_TYPE, vec![two]); // Delete where nothing is getting deleted @@ -837,8 +862,10 @@ fn insert_many_and_delete_many() { let one = SCALAR_ENTITY.clone(); let mut two = SCALAR_ENTITY.clone(); two.set("id", "two").unwrap(); + two.set("vid", 1i64).unwrap(); let mut three = SCALAR_ENTITY.clone(); three.set("id", "three").unwrap(); + three.set("vid", 2i64).unwrap(); insert_entity(conn, layout, &*SCALAR_TYPE, vec![one, two, three]); // confidence test: there should be 3 scalar entities in store right now @@ -919,6 +946,7 @@ fn conflicting_entity() { cat: &str, dog: &str, ferret: &str, + vid: i64, ) { let conflicting = |conn: &mut PgConnection, entity_type: &EntityType, types: Vec<&EntityType>| { @@ -944,7 +972,7 @@ fn conflicting_entity() { let dog_type = layout.input_schema.entity_type(dog).unwrap(); let ferret_type = layout.input_schema.entity_type(ferret).unwrap(); - let fred = entity! { layout.input_schema => id: id.clone(), name: id.clone() }; + let fred = entity! { layout.input_schema => id: id.clone(), name: id.clone(), vid: vid }; insert_entity(conn, layout, &cat_type, vec![fred]); // If we wanted to create Fred the dog, which is forbidden, we'd run this: @@ -958,10 +986,10 @@ fn conflicting_entity() { run_test(|mut conn, layout| { let id = Value::String("fred".to_string()); - check(&mut conn, layout, id, "Cat", "Dog", "Ferret"); + check(&mut conn, layout, id, "Cat", "Dog", "Ferret", 0); let id = Value::Bytes(scalar::Bytes::from_str("0xf1ed").unwrap()); - check(&mut conn, layout, id, "ByteCat", "ByteDog", "ByteFerret"); + check(&mut conn, layout, id, "ByteCat", "ByteDog", "ByteFerret", 1); }) } @@ -973,7 +1001,8 @@ fn revert_block() { let set_fred = |conn: &mut PgConnection, name, block| { let fred = entity! { layout.input_schema => id: id, - name: name + name: name, + vid: block as i64, }; if block == 0 { insert_entity_at(conn, layout, &*CAT_TYPE, vec![fred], block); @@ -1013,6 +1042,7 @@ fn revert_block() { let marty = entity! { layout.input_schema => id: id, order: block, + vid: (block + 10) as i64 }; insert_entity_at(conn, layout, &*MINK_TYPE, vec![marty], block); } @@ -1091,6 +1121,7 @@ impl<'a> QueryChecker<'a> { None, 23, 0, + 3, ); insert_pets(conn, layout); @@ -1203,6 +1234,7 @@ fn check_block_finds() { None, 55, 1, + 4, ); checker @@ -1745,10 +1777,10 @@ struct FilterChecker<'a> { impl<'a> FilterChecker<'a> { fn new(conn: &'a mut PgConnection, layout: &'a Layout) -> Self { let (a1, a2, a2b, a3) = ferrets(); - insert_pet(conn, layout, &*FERRET_TYPE, "a1", &a1, 0); - insert_pet(conn, layout, &*FERRET_TYPE, "a2", &a2, 0); - insert_pet(conn, layout, &*FERRET_TYPE, "a2b", &a2b, 0); - insert_pet(conn, layout, &*FERRET_TYPE, "a3", &a3, 0); + insert_pet(conn, layout, &*FERRET_TYPE, "a1", &a1, 0, 0); + insert_pet(conn, layout, &*FERRET_TYPE, "a2", &a2, 0, 1); + insert_pet(conn, layout, &*FERRET_TYPE, "a2b", &a2b, 0, 2); + insert_pet(conn, layout, &*FERRET_TYPE, "a3", &a3, 0, 3); Self { conn, layout } } @@ -1892,7 +1924,8 @@ fn check_filters() { &*FERRET_TYPE, vec![entity! { layout.input_schema => id: "a1", - name: "Test" + name: "Test", + vid: 5i64 }], 1, ); diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index b7b8f36b7d7..3f4bd88c8d8 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -57,6 +57,7 @@ lazy_static! { static ref BEEF_ENTITY: Entity = entity! { THINGS_SCHEMA => id: scalar::Bytes::from_str("deadbeef").unwrap(), name: "Beef", + vid: 0i64 }; static ref NAMESPACE: Namespace = Namespace::new("sgd0815".to_string()).unwrap(); static ref THING_TYPE: EntityType = THINGS_SCHEMA.entity_type("Thing").unwrap(); @@ -128,14 +129,15 @@ fn insert_entity(conn: &mut PgConnection, layout: &Layout, entity_type: &str, en layout.insert(conn, &group, &MOCK_STOPWATCH).expect(&errmsg); } -fn insert_thing(conn: &mut PgConnection, layout: &Layout, id: &str, name: &str) { +fn insert_thing(conn: &mut PgConnection, layout: &Layout, id: &str, name: &str, vid: i64) { insert_entity( conn, layout, "Thing", entity! { layout.input_schema => id: id, - name: name + name: name, + vid: vid, }, ); } @@ -155,12 +157,6 @@ fn create_schema(conn: &mut PgConnection) -> Layout { .expect("Failed to create relational schema") } -fn scrub(entity: &Entity) -> Entity { - let mut scrubbed = entity.clone(); - scrubbed.remove_null_fields(); - scrubbed -} - macro_rules! assert_entity_eq { ($left:expr, $right:expr) => {{ let (left, right) = (&($left), &($right)); @@ -265,11 +261,11 @@ fn find() { const ID: &str = "deadbeef"; const NAME: &str = "Beef"; - insert_thing(&mut conn, layout, ID, NAME); + insert_thing(&mut conn, layout, ID, NAME, 0); // Happy path: find existing entity let entity = find_entity(conn, layout, ID).unwrap(); - assert_entity_eq!(scrub(&BEEF_ENTITY), entity); + assert_entity_eq!(BEEF_ENTITY.clone(), entity); assert!(CausalityRegion::from_entity(&entity) == CausalityRegion::ONCHAIN); // Find non-existing entity @@ -285,8 +281,8 @@ fn find_many() { const NAME: &str = "Beef"; const ID2: &str = "0xdeadbeef02"; const NAME2: &str = "Moo"; - insert_thing(&mut conn, layout, ID, NAME); - insert_thing(&mut conn, layout, ID2, NAME2); + insert_thing(&mut conn, layout, ID, NAME, 0); + insert_thing(&mut conn, layout, ID2, NAME2, 1); let mut id_map = BTreeMap::default(); let ids = IdList::try_from_iter( @@ -318,6 +314,7 @@ fn update() { // Update the entity let mut entity = BEEF_ENTITY.clone(); entity.set("name", "Moo").unwrap(); + entity.set("vid", 1i64).unwrap(); let key = THING_TYPE.key(entity.id()); let entity_id = entity.id(); @@ -345,6 +342,7 @@ fn delete() { insert_entity(&mut conn, layout, "Thing", BEEF_ENTITY.clone()); let mut two = BEEF_ENTITY.clone(); two.set("id", TWO_ID).unwrap(); + two.set("vid", 1i64).unwrap(); insert_entity(&mut conn, layout, "Thing", two); // Delete where nothing is getting deleted @@ -392,29 +390,34 @@ fn make_thing_tree(conn: &mut PgConnection, layout: &Layout) -> (Entity, Entity, let root = entity! { layout.input_schema => id: ROOT, name: "root", - children: vec!["babe01", "babe02"] + children: vec!["babe01", "babe02"], + vid: 0i64, }; let child1 = entity! { layout.input_schema => id: CHILD1, name: "child1", parent: "dead00", - children: vec![GRANDCHILD1] + children: vec![GRANDCHILD1], + vid: 1i64, }; let child2 = entity! { layout.input_schema => id: CHILD2, name: "child2", parent: "dead00", - children: vec![GRANDCHILD1] + children: vec![GRANDCHILD1], + vid: 2i64, }; let grand_child1 = entity! { layout.input_schema => id: GRANDCHILD1, name: "grandchild1", - parent: CHILD1 + parent: CHILD1, + vid: 3i64, }; let grand_child2 = entity! { layout.input_schema => id: GRANDCHILD2, name: "grandchild2", - parent: CHILD2 + parent: CHILD2, + vid: 4i64, }; insert_entity(conn, layout, "Thing", root.clone()); diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index aba953975a3..5f2f1e80e6c 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -3,10 +3,11 @@ use graph::blockchain::BlockTime; use graph::data::graphql::ext::TypeDefinitionExt; use graph::data::query::QueryTarget; use graph::data::subgraph::schema::DeploymentCreate; +use graph::data_source::common::MappingABI; use graph::futures01::{future, Stream}; use graph::futures03::compat::Future01CompatExt; use graph::schema::{EntityType, InputSchema}; -use graph_chain_ethereum::{Mapping, MappingABI}; +use graph_chain_ethereum::Mapping; use hex_literal::hex; use lazy_static::lazy_static; use std::time::Duration; @@ -164,7 +165,7 @@ where async fn insert_test_data(store: Arc) -> DeploymentLocator { let manifest = SubgraphManifest:: { id: TEST_SUBGRAPH_ID.clone(), - spec_version: Version::new(1, 0, 0), + spec_version: Version::new(1, 3, 0), features: Default::default(), description: None, repository: None, @@ -200,6 +201,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator 184.4, false, None, + 0, ); transact_entity_operations( &store, @@ -219,6 +221,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator 159.1, true, Some("red"), + 1, ); let test_entity_3_1 = create_test_entity( "3", @@ -229,6 +232,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator 111.7, false, Some("blue"), + 2, ); transact_entity_operations( &store, @@ -248,6 +252,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator 111.7, false, None, + 3, ); transact_and_wait( &store, @@ -271,6 +276,7 @@ fn create_test_entity( weight: f64, coffee: bool, favorite_color: Option<&str>, + vid: i64, ) -> EntityOperation { let bin_name = scalar::Bytes::from_str(&hex::encode(name)).unwrap(); let test_entity = entity! { TEST_SUBGRAPH_SCHEMA => @@ -283,6 +289,7 @@ fn create_test_entity( weight: Value::BigDecimal(weight.into()), coffee: coffee, favorite_color: favorite_color, + vid: vid, }; EntityOperation::Set { @@ -351,6 +358,7 @@ fn get_entity_1() { seconds_age: Value::BigInt(BigInt::from(2114359200)), weight: Value::BigDecimal(184.4.into()), coffee: false, + vid: 0i64 }; // "favorite_color" was set to `Null` earlier and should be absent @@ -376,6 +384,7 @@ fn get_entity_3() { seconds_age: Value::BigInt(BigInt::from(883612800)), weight: Value::BigDecimal(111.7.into()), coffee: false, + vid: 3_i64, }; // "favorite_color" was set to `Null` earlier and should be absent @@ -397,6 +406,7 @@ fn insert_entity() { 111.7, true, Some("green"), + 5, ); let count = get_entity_count(store.clone(), &deployment.hash); transact_and_wait( @@ -428,6 +438,7 @@ fn update_existing() { 111.7, true, Some("green"), + 6, ); let mut new_data = match op { EntityOperation::Set { ref data, .. } => data.clone(), @@ -466,7 +477,8 @@ fn partially_update_existing() { let entity_key = USER_TYPE.parse_key("1").unwrap(); let schema = writable.input_schema(); - let partial_entity = entity! { schema => id: "1", name: "Johnny Boy", email: Value::Null }; + let partial_entity = + entity! { schema => id: "1", name: "Johnny Boy", email: Value::Null, vid: 11i64 }; let original_entity = writable .get(&entity_key) @@ -1076,7 +1088,8 @@ fn revert_block_with_partial_update() { let entity_key = USER_TYPE.parse_key("1").unwrap(); let schema = writable.input_schema(); - let partial_entity = entity! { schema => id: "1", name: "Johnny Boy", email: Value::Null }; + let partial_entity = + entity! { schema => id: "1", name: "Johnny Boy", email: Value::Null, vid: 5i64 }; let original_entity = writable.get(&entity_key).unwrap().expect("missing entity"); @@ -1087,7 +1100,7 @@ fn revert_block_with_partial_update() { TEST_BLOCK_3_PTR.clone(), vec![EntityOperation::Set { key: entity_key.clone(), - data: partial_entity.clone(), + data: partial_entity, }], ) .await @@ -1171,7 +1184,8 @@ fn revert_block_with_dynamic_data_source_operations() { // Create operations to add a user let user_key = USER_TYPE.parse_key("1").unwrap(); - let partial_entity = entity! { schema => id: "1", name: "Johnny Boy", email: Value::Null }; + let partial_entity = + entity! { schema => id: "1", name: "Johnny Boy", email: Value::Null, vid: 5i64 }; // Get the original user for comparisons let original_user = writable.get(&user_key).unwrap().expect("missing entity"); @@ -1256,7 +1270,7 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { .expect("Failed to parse user schema"); let manifest = SubgraphManifest:: { id: subgraph_id.clone(), - spec_version: Version::new(1, 0, 0), + spec_version: Version::new(1, 3, 0), features: Default::default(), description: None, repository: None, @@ -1290,9 +1304,12 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { let added_entities = vec![ ( "1".to_owned(), - entity! { schema => id: "1", name: "Johnny Boy" }, + entity! { schema => id: "1", name: "Johnny Boy", vid: 5i64 }, + ), + ( + "2".to_owned(), + entity! { schema => id: "2", name: "Tessa", vid: 6i64 }, ), - ("2".to_owned(), entity! { schema => id: "2", name: "Tessa" }), ]; transact_and_wait( &store.subgraph_store(), @@ -1300,9 +1317,13 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { TEST_BLOCK_1_PTR.clone(), added_entities .iter() - .map(|(id, data)| EntityOperation::Set { - key: USER_TYPE.parse_key(id.as_str()).unwrap(), - data: data.clone(), + .map(|(id, data)| { + let mut data = data.clone(); + data.set_vid_if_empty(); + EntityOperation::Set { + key: USER_TYPE.parse_key(id.as_str()).unwrap(), + data, + } }) .collect(), ) @@ -1310,7 +1331,7 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { .unwrap(); // Update an entity in the store - let updated_entity = entity! { schema => id: "1", name: "Johnny" }; + let updated_entity = entity! { schema => id: "1", name: "Johnny", vid: 7i64 }; let update_op = EntityOperation::Set { key: USER_TYPE.parse_key("1").unwrap(), data: updated_entity.clone(), @@ -1386,6 +1407,7 @@ fn throttle_subscription_delivers() { 120.7, false, None, + 7, ); transact_entity_operations( @@ -1431,6 +1453,7 @@ fn throttle_subscription_throttles() { 120.7, false, None, + 8, ); transact_entity_operations( @@ -1504,8 +1527,9 @@ fn handle_large_string_with_index() { name: &str, schema: &InputSchema, block: BlockNumber, + vid: i64, ) -> EntityModification { - let data = entity! { schema => id: id, name: name }; + let data = entity! { schema => id: id, name: name, vid: vid }; let key = USER_TYPE.parse_key(id).unwrap(); @@ -1538,8 +1562,8 @@ fn handle_large_string_with_index() { BlockTime::for_test(&*TEST_BLOCK_3_PTR), FirehoseCursor::None, vec![ - make_insert_op(ONE, &long_text, &schema, block), - make_insert_op(TWO, &other_text, &schema, block), + make_insert_op(ONE, &long_text, &schema, block, 11), + make_insert_op(TWO, &other_text, &schema, block, 12), ], &stopwatch_metrics, Vec::new(), @@ -1550,6 +1574,7 @@ fn handle_large_string_with_index() { ) .await .expect("Failed to insert large text"); + writable.flush().await.unwrap(); let query = user_query() @@ -1603,8 +1628,9 @@ fn handle_large_bytea_with_index() { name: &[u8], schema: &InputSchema, block: BlockNumber, + vid: i64, ) -> EntityModification { - let data = entity! { schema => id: id, bin_name: scalar::Bytes::from(name) }; + let data = entity! { schema => id: id, bin_name: scalar::Bytes::from(name), vid: vid }; let key = USER_TYPE.parse_key(id).unwrap(); @@ -1642,8 +1668,8 @@ fn handle_large_bytea_with_index() { BlockTime::for_test(&*TEST_BLOCK_3_PTR), FirehoseCursor::None, vec![ - make_insert_op(ONE, &long_bytea, &schema, block), - make_insert_op(TWO, &other_bytea, &schema, block), + make_insert_op(ONE, &long_bytea, &schema, block, 10), + make_insert_op(TWO, &other_bytea, &schema, block, 11), ], &stopwatch_metrics, Vec::new(), @@ -1811,8 +1837,10 @@ fn window() { id: &str, color: &str, age: i32, + vid: i64, ) -> EntityOperation { - let entity = entity! { TEST_SUBGRAPH_SCHEMA => id: id, age: age, favorite_color: color }; + let entity = + entity! { TEST_SUBGRAPH_SCHEMA => id: id, age: age, favorite_color: color, vid: vid }; EntityOperation::Set { key: entity_type.parse_key(id).unwrap(), @@ -1820,25 +1848,25 @@ fn window() { } } - fn make_user(id: &str, color: &str, age: i32) -> EntityOperation { - make_color_and_age(&*USER_TYPE, id, color, age) + fn make_user(id: &str, color: &str, age: i32, vid: i64) -> EntityOperation { + make_color_and_age(&*USER_TYPE, id, color, age, vid) } - fn make_person(id: &str, color: &str, age: i32) -> EntityOperation { - make_color_and_age(&*PERSON_TYPE, id, color, age) + fn make_person(id: &str, color: &str, age: i32, vid: i64) -> EntityOperation { + make_color_and_age(&*PERSON_TYPE, id, color, age, vid) } let ops = vec![ - make_user("4", "green", 34), - make_user("5", "green", 17), - make_user("6", "green", 41), - make_user("7", "red", 25), - make_user("8", "red", 45), - make_user("9", "yellow", 37), - make_user("10", "blue", 27), - make_user("11", "blue", 19), - make_person("p1", "green", 12), - make_person("p2", "red", 15), + make_user("4", "green", 34, 11), + make_user("5", "green", 17, 12), + make_user("6", "green", 41, 13), + make_user("7", "red", 25, 14), + make_user("8", "red", 45, 15), + make_user("9", "yellow", 37, 16), + make_user("10", "blue", 27, 17), + make_user("11", "blue", 19, 18), + make_person("p1", "green", 12, 19), + make_person("p2", "red", 15, 20), ]; run_test(|store, _, deployment| async move { @@ -2076,6 +2104,7 @@ fn reorg_tracking() { deployment: &DeploymentLocator, age: i32, block: &BlockPtr, + vid: i64, ) { let test_entity_1 = create_test_entity( "1", @@ -2086,6 +2115,7 @@ fn reorg_tracking() { 184.4, false, None, + vid, ); transact_and_wait(store, deployment, block.clone(), vec![test_entity_1]) .await @@ -2136,15 +2166,15 @@ fn reorg_tracking() { check_state!(store, 2, 2, 2); // Forward to block 3 - update_john(&subgraph_store, &deployment, 70, &TEST_BLOCK_3_PTR).await; + update_john(&subgraph_store, &deployment, 70, &TEST_BLOCK_3_PTR, 5).await; check_state!(store, 2, 2, 3); // Forward to block 4 - update_john(&subgraph_store, &deployment, 71, &TEST_BLOCK_4_PTR).await; + update_john(&subgraph_store, &deployment, 71, &TEST_BLOCK_4_PTR, 6).await; check_state!(store, 2, 2, 4); // Forward to block 5 - update_john(&subgraph_store, &deployment, 72, &TEST_BLOCK_5_PTR).await; + update_john(&subgraph_store, &deployment, 72, &TEST_BLOCK_5_PTR, 7).await; check_state!(store, 2, 2, 5); // Revert all the way back to block 2 diff --git a/store/test-store/tests/postgres/subgraph.rs b/store/test-store/tests/postgres/subgraph.rs index f52e8fa71f9..3065c8800ef 100644 --- a/store/test-store/tests/postgres/subgraph.rs +++ b/store/test-store/tests/postgres/subgraph.rs @@ -170,7 +170,7 @@ fn create_subgraph() { let manifest = SubgraphManifest:: { id, - spec_version: Version::new(1, 0, 0), + spec_version: Version::new(1, 3, 0), features: Default::default(), description: None, repository: None, @@ -547,7 +547,7 @@ fn subgraph_features() { } = get_subgraph_features(id.to_string()).unwrap(); assert_eq!(NAME, subgraph_id.as_str()); - assert_eq!("1.0.0", spec_version); + assert_eq!("1.3.0", spec_version); assert_eq!("1.0.0", api_version.unwrap()); assert_eq!(NETWORK_NAME, network); assert_eq!( diff --git a/store/test-store/tests/postgres/writable.rs b/store/test-store/tests/postgres/writable.rs index df04615898a..2e3e138d567 100644 --- a/store/test-store/tests/postgres/writable.rs +++ b/store/test-store/tests/postgres/writable.rs @@ -1,14 +1,17 @@ -use graph::blockchain::block_stream::FirehoseCursor; +use graph::blockchain::block_stream::{EntitySourceOperation, FirehoseCursor}; use graph::data::subgraph::schema::DeploymentCreate; use graph::data::value::Word; use graph::data_source::CausalityRegion; use graph::schema::{EntityKey, EntityType, InputSchema}; use lazy_static::lazy_static; -use std::collections::BTreeSet; +use std::collections::{BTreeMap, BTreeSet}; use std::marker::PhantomData; +use std::ops::Range; use test_store::*; -use graph::components::store::{DeploymentLocator, DerivedEntityQuery, WritableStore}; +use graph::components::store::{ + DeploymentLocator, DerivedEntityQuery, SourceableStore, WritableStore, +}; use graph::data::subgraph::*; use graph::semver::Version; use graph::{entity, prelude::*}; @@ -21,9 +24,40 @@ const SCHEMA_GQL: &str = " id: ID!, count: Int!, } + type Counter2 @entity(immutable: true) { + id: ID!, + count: Int!, + } + type BytesId @entity { + id: Bytes!, + value: String! + } + type Int8Id @entity { + id: Int8!, + value: String! + } + type StringId @entity { + id: String!, + value: String! + } + type PoolCreated @entity(immutable: true) { + id: Bytes!, + token0: Bytes!, + token1: Bytes!, + fee: Int!, + tickSpacing: Int!, + pool: Bytes!, + blockNumber: BigInt!, + blockTimestamp: BigInt!, + transactionHash: Bytes!, + transactionFrom: Bytes!, + transactionGasPrice: BigInt!, + logIndex: BigInt! + } "; const COUNTER: &str = "Counter"; +const COUNTER2: &str = "Counter2"; lazy_static! { static ref TEST_SUBGRAPH_ID_STRING: String = String::from("writableSubgraph"); @@ -33,6 +67,7 @@ lazy_static! { InputSchema::parse_latest(SCHEMA_GQL, TEST_SUBGRAPH_ID.clone()) .expect("Failed to parse user schema"); static ref COUNTER_TYPE: EntityType = TEST_SUBGRAPH_SCHEMA.entity_type(COUNTER).unwrap(); + static ref COUNTER2_TYPE: EntityType = TEST_SUBGRAPH_SCHEMA.entity_type(COUNTER2).unwrap(); } /// Inserts test data into the store. @@ -41,7 +76,7 @@ lazy_static! { async fn insert_test_data(store: Arc) -> DeploymentLocator { let manifest = SubgraphManifest:: { id: TEST_SUBGRAPH_ID.clone(), - spec_version: Version::new(1, 0, 0), + spec_version: Version::new(1, 3, 0), features: Default::default(), description: None, repository: None, @@ -80,7 +115,14 @@ fn remove_test_data(store: Arc) { /// Test harness for running database integration tests. fn run_test(test: F) where - F: FnOnce(Arc, Arc, DeploymentLocator) -> R + Send + 'static, + F: FnOnce( + Arc, + Arc, + Arc, + DeploymentLocator, + ) -> R + + Send + + 'static, R: std::future::Future + Send + 'static, { run_test_sequentially(|store| async move { @@ -95,10 +137,15 @@ where .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) .await .expect("we can get a writable store"); + let sourceable = store + .subgraph_store() + .sourceable(deployment.id) + .await + .expect("we can get a writable store"); // Run test and wait for the background writer to finish its work so // it won't conflict with the next test - test(store, writable, deployment).await; + test(store, writable, sourceable, deployment).await; }); } @@ -111,16 +158,47 @@ fn count_key(id: &str) -> EntityKey { COUNTER_TYPE.parse_key(id).unwrap() } -async fn insert_count(store: &Arc, deployment: &DeploymentLocator, count: u8) { +async fn insert_count( + store: &Arc, + deployment: &DeploymentLocator, + block: u8, + count: u8, + immutable_only: bool, +) { + let count_key_local = |counter_type: &EntityType, id: &str| counter_type.parse_key(id).unwrap(); let data = entity! { TEST_SUBGRAPH_SCHEMA => id: "1", - count: count as i32 + count: count as i32, + vid: block as i64, + }; + let entity_op = if block != 3 && block != 5 && block != 7 { + EntityOperation::Set { + key: count_key_local(&COUNTER_TYPE, &data.get("id").unwrap().to_string()), + data, + } + } else { + EntityOperation::Remove { + key: count_key_local(&COUNTER_TYPE, &data.get("id").unwrap().to_string()), + } }; - let entity_op = EntityOperation::Set { - key: count_key(&data.get("id").unwrap().to_string()), - data, + let mut ops = if immutable_only { + vec![] + } else { + vec![entity_op] }; - transact_entity_operations(store, deployment, block_pointer(count), vec![entity_op]) + if block < 6 { + let data = entity! { TEST_SUBGRAPH_SCHEMA => + id: &block.to_string(), + count :count as i32, + vid: block as i64, + }; + let entity_op = EntityOperation::Set { + key: count_key_local(&COUNTER2_TYPE, &data.get("id").unwrap().to_string()), + data, + }; + ops.push(entity_op); + } + transact_entity_operations(store, deployment, block_pointer(block), ops) .await .unwrap(); } @@ -140,7 +218,7 @@ fn get_with_pending(batch: bool, read_count: F) where F: Send + Fn(&dyn WritableStore) -> i32 + Sync + 'static, { - run_test(move |store, writable, deployment| async move { + run_test(move |store, writable, _, deployment| async move { let subgraph_store = store.subgraph_store(); let read_count = || read_count(writable.as_ref()); @@ -150,13 +228,13 @@ where } for count in 1..4 { - insert_count(&subgraph_store, &deployment, count).await; + insert_count(&subgraph_store, &deployment, count, count, false).await; } // Test reading back with pending writes to the same entity pause_writer(&deployment).await; for count in 4..7 { - insert_count(&subgraph_store, &deployment, count).await; + insert_count(&subgraph_store, &deployment, count, count, false).await; } assert_eq!(6, read_count()); @@ -165,7 +243,7 @@ where // Test reading back with pending writes and a pending revert for count in 7..10 { - insert_count(&subgraph_store, &deployment, count).await; + insert_count(&subgraph_store, &deployment, count, count, false).await; } writable .revert_block_operations(block_pointer(2), FirehoseCursor::None) @@ -238,14 +316,14 @@ fn get_derived_nobatch() { #[test] fn restart() { - run_test(|store, writable, deployment| async move { + run_test(|store, writable, _, deployment| async move { let subgraph_store = store.subgraph_store(); let schema = subgraph_store.input_schema(&deployment.hash).unwrap(); // Cause an error by leaving out the non-nullable `count` attribute let entity_ops = vec![EntityOperation::Set { key: count_key("1"), - data: entity! { schema => id: "1" }, + data: entity! { schema => id: "1", vid: 0i64}, }]; transact_entity_operations( &subgraph_store, @@ -269,7 +347,7 @@ fn restart() { // Retry our write with correct data let entity_ops = vec![EntityOperation::Set { key: count_key("1"), - data: entity! { schema => id: "1", count: 1 }, + data: entity! { schema => id: "1", count: 1, vid: 0i64}, }]; // `SubgraphStore` caches the correct writable so that this call // uses the restarted writable, and is equivalent to using @@ -286,3 +364,141 @@ fn restart() { writable.flush().await.unwrap(); }) } + +#[test] +fn read_range_test() { + run_test(|store, writable, sourceable, deployment| async move { + let result_entities = vec![ + r#"(1, [EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter), entity: Entity { count: Int(2), id: String("1"), vid: Int8(1) }, vid: 1 }, EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter2), entity: Entity { count: Int(2), id: String("1"), vid: Int8(1) }, vid: 1 }])"#, + r#"(2, [EntitySourceOperation { entity_op: Modify, entity_type: EntityType(Counter), entity: Entity { count: Int(4), id: String("1"), vid: Int8(2) }, vid: 2 }, EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter2), entity: Entity { count: Int(4), id: String("2"), vid: Int8(2) }, vid: 2 }])"#, + r#"(3, [EntitySourceOperation { entity_op: Delete, entity_type: EntityType(Counter), entity: Entity { count: Int(4), id: String("1"), vid: Int8(2) }, vid: 2 }, EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter2), entity: Entity { count: Int(6), id: String("3"), vid: Int8(3) }, vid: 3 }])"#, + r#"(4, [EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter), entity: Entity { count: Int(8), id: String("1"), vid: Int8(4) }, vid: 4 }, EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter2), entity: Entity { count: Int(8), id: String("4"), vid: Int8(4) }, vid: 4 }])"#, + r#"(5, [EntitySourceOperation { entity_op: Delete, entity_type: EntityType(Counter), entity: Entity { count: Int(8), id: String("1"), vid: Int8(4) }, vid: 4 }, EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter2), entity: Entity { count: Int(10), id: String("5"), vid: Int8(5) }, vid: 5 }])"#, + r#"(6, [EntitySourceOperation { entity_op: Create, entity_type: EntityType(Counter), entity: Entity { count: Int(12), id: String("1"), vid: Int8(6) }, vid: 6 }])"#, + r#"(7, [EntitySourceOperation { entity_op: Delete, entity_type: EntityType(Counter), entity: Entity { count: Int(12), id: String("1"), vid: Int8(6) }, vid: 6 }])"#, + ]; + let subgraph_store = store.subgraph_store(); + writable.deployment_synced(block_pointer(0)).unwrap(); + + for count in 1..=5 { + insert_count(&subgraph_store, &deployment, count, 2 * count, false).await; + } + writable.flush().await.unwrap(); + writable.deployment_synced(block_pointer(0)).unwrap(); + + let br: Range = 0..18; + let entity_types = vec![COUNTER_TYPE.clone(), COUNTER2_TYPE.clone()]; + let e: BTreeMap> = sourceable + .get_range(entity_types.clone(), CausalityRegion::ONCHAIN, br.clone()) + .unwrap(); + assert_eq!(e.len(), 5); + for en in &e { + let index = *en.0 - 1; + let a = result_entities[index as usize]; + assert_eq!(a, format!("{:?}", en)); + } + for count in 6..=7 { + insert_count(&subgraph_store, &deployment, count, 2 * count, false).await; + } + writable.flush().await.unwrap(); + writable.deployment_synced(block_pointer(0)).unwrap(); + let e: BTreeMap> = sourceable + .get_range(entity_types, CausalityRegion::ONCHAIN, br) + .unwrap(); + assert_eq!(e.len(), 7); + for en in &e { + let index = *en.0 - 1; + let a = result_entities[index as usize]; + assert_eq!(a, format!("{:?}", en)); + } + }) +} + +#[test] +fn read_immutable_only_range_test() { + run_test(|store, writable, sourceable, deployment| async move { + let subgraph_store = store.subgraph_store(); + writable.deployment_synced(block_pointer(0)).unwrap(); + + for count in 1..=4 { + insert_count(&subgraph_store, &deployment, count, 2 * count, true).await; + } + writable.flush().await.unwrap(); + writable.deployment_synced(block_pointer(0)).unwrap(); + let br: Range = 0..18; + let entity_types = vec![COUNTER2_TYPE.clone()]; + let e: BTreeMap> = sourceable + .get_range(entity_types.clone(), CausalityRegion::ONCHAIN, br.clone()) + .unwrap(); + assert_eq!(e.len(), 4); + }) +} + +#[test] +fn read_range_pool_created_test() { + run_test(|store, writable, sourceable, deployment| async move { + let result_entities = vec![ + format!("(1, [EntitySourceOperation {{ entity_op: Create, entity_type: EntityType(PoolCreated), entity: Entity {{ blockNumber: BigInt(12369621), blockTimestamp: BigInt(1620243254), fee: Int(500), id: Bytes(0xff80818283848586), logIndex: BigInt(0), pool: Bytes(0x8ad599c3a0ff1de082011efddc58f1908eb6e6d8), tickSpacing: Int(10), token0: Bytes(0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48), token1: Bytes(0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2), transactionFrom: Bytes(0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48), transactionGasPrice: BigInt(100000000000), transactionHash: Bytes(0x12340000000000000000000000000000000000000000000000000000000000000000000000000000), vid: Int8(1) }}, vid: 1 }}])"), + format!("(2, [EntitySourceOperation {{ entity_op: Create, entity_type: EntityType(PoolCreated), entity: Entity {{ blockNumber: BigInt(12369622), blockTimestamp: BigInt(1620243255), fee: Int(3000), id: Bytes(0xff90919293949596), logIndex: BigInt(1), pool: Bytes(0x4585fe77225b41b697c938b018e2ac67ac5a20c0), tickSpacing: Int(60), token0: Bytes(0x2260fac5e5542a773aa44fbcfedf7c193bc2c599), token1: Bytes(0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2), transactionFrom: Bytes(0x2260fac5e5542a773aa44fbcfedf7c193bc2c599), transactionGasPrice: BigInt(100000000000), transactionHash: Bytes(0x12340000000000000000000000000000000000000000000000000000000000000000000000000001), vid: Int8(2) }}, vid: 2 }}])"), + ]; + + // Rest of the test remains the same + let subgraph_store = store.subgraph_store(); + writable.deployment_synced(block_pointer(0)).unwrap(); + + let pool_created_type = TEST_SUBGRAPH_SCHEMA.entity_type("PoolCreated").unwrap(); + let entity_types = vec![pool_created_type.clone()]; + + for count in (1..=2).map(|x| x as i64) { + let id = if count == 1 { + "0xff80818283848586" + } else { + "0xff90919293949596" + }; + + let data = entity! { TEST_SUBGRAPH_SCHEMA => + id: id, + token0: if count == 1 { "0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48" } else { "0x2260fac5e5542a773aa44fbcfedf7c193bc2c599" }, + token1: "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2", + fee: if count == 1 { 500 } else { 3000 }, + tickSpacing: if count == 1 { 10 } else { 60 }, + pool: if count == 1 { "0x8ad599c3a0ff1de082011efddc58f1908eb6e6d8" } else { "0x4585fe77225b41b697c938b018e2ac67ac5a20c0" }, + blockNumber: 12369621 + count - 1, + blockTimestamp: 1620243254 + count - 1, + transactionHash: format!("0x1234{:0>76}", if count == 1 { "0" } else { "1" }), + transactionFrom: if count == 1 { "0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48" } else { "0x2260fac5e5542a773aa44fbcfedf7c193bc2c599" }, + transactionGasPrice: 100000000000i64, + logIndex: count - 1, + vid: count + }; + + let key = pool_created_type.parse_key(id).unwrap(); + let op = EntityOperation::Set { + key: key.clone(), + data, + }; + + transact_entity_operations( + &subgraph_store, + &deployment, + block_pointer(count as u8), + vec![op], + ) + .await + .unwrap(); + } + writable.flush().await.unwrap(); + writable.deployment_synced(block_pointer(0)).unwrap(); + + let br: Range = 0..18; + let e: BTreeMap> = sourceable + .get_range(entity_types.clone(), CausalityRegion::ONCHAIN, br.clone()) + .unwrap(); + assert_eq!(e.len(), 2); + for en in &e { + let index = *en.0 - 1; + let a = result_entities[index as usize].clone(); + assert_eq!(a, format!("{:?}", en)); + } + }) +} diff --git a/substreams/substreams-trigger-filter/package.json b/substreams/substreams-trigger-filter/package.json index 8f62b8cc51d..00b628b1e1b 100644 --- a/substreams/substreams-trigger-filter/package.json +++ b/substreams/substreams-trigger-filter/package.json @@ -1 +1 @@ -{ "dependencies": { "@graphprotocol/graph-cli": "^0.67.2" } } \ No newline at end of file +{ "dependencies": { "@graphprotocol/graph-cli": "^0.92.0" } } \ No newline at end of file diff --git a/tests/contracts/out/LimitedContract.sol/LimitedContract.json b/tests/contracts/out/LimitedContract.sol/LimitedContract.json index 8dae4d1f7ce..f853978ad6c 100644 --- a/tests/contracts/out/LimitedContract.sol/LimitedContract.json +++ b/tests/contracts/out/LimitedContract.sol/LimitedContract.json @@ -1,450 +1 @@ -{ - "abi": [ - { "type": "constructor", "inputs": [], "stateMutability": "nonpayable" }, - { - "type": "function", - "name": "inc", - "inputs": [ - { "name": "value", "type": "uint256", "internalType": "uint256" } - ], - "outputs": [{ "name": "", "type": "uint256", "internalType": "uint256" }], - "stateMutability": "pure" - }, - { "type": "event", "name": "Trigger", "inputs": [], "anonymous": false } - ], - "bytecode": { - "object": "0x608060405234801561001057600080fd5b506040517f3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d90600090a1610120806100496000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063812600df14602d575b600080fd5b603c603836600460b2565b604e565b60405190815260200160405180910390f35b6000600a821060a35760405162461bcd60e51b815260206004820152601b60248201527f63616e206f6e6c792068616e646c652076616c756573203c2031300000000000604482015260640160405180910390fd5b60ac82600160ca565b92915050565b60006020828403121560c357600080fd5b5035919050565b8082018082111560ac57634e487b7160e01b600052601160045260246000fdfea264697066735822122045679e894d199dcf13e7f3e6d9816bf08cd9cceab355500d502bbfada548205f64736f6c63430008130033", - "sourceMap": "57:257:0:-:0;;;110:45;;;;;;;;;-1:-1:-1;139:9:0;;;;;;;57:257;;;;;;", - "linkReferences": {} - }, - "deployedBytecode": { - "object": "0x6080604052348015600f57600080fd5b506004361060285760003560e01c8063812600df14602d575b600080fd5b603c603836600460b2565b604e565b60405190815260200160405180910390f35b6000600a821060a35760405162461bcd60e51b815260206004820152601b60248201527f63616e206f6e6c792068616e646c652076616c756573203c2031300000000000604482015260640160405180910390fd5b60ac82600160ca565b92915050565b60006020828403121560c357600080fd5b5035919050565b8082018082111560ac57634e487b7160e01b600052601160045260246000fdfea264697066735822122045679e894d199dcf13e7f3e6d9816bf08cd9cceab355500d502bbfada548205f64736f6c63430008130033", - "sourceMap": "57:257:0:-:0;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;161:151;;;;;;:::i;:::-;;:::i;:::-;;;345:25:4;;;333:2;318:18;161:151:0;;;;;;;;210:7;245:2;237:5;:10;229:50;;;;-1:-1:-1;;;229:50:0;;583:2:4;229:50:0;;;565:21:4;622:2;602:18;;;595:30;661:29;641:18;;;634:57;708:18;;229:50:0;;;;;;;;296:9;:5;304:1;296:9;:::i;:::-;289:16;161:151;-1:-1:-1;;161:151:0:o;14:180:4:-;73:6;126:2;114:9;105:7;101:23;97:32;94:52;;;142:1;139;132:12;94:52;-1:-1:-1;165:23:4;;14:180;-1:-1:-1;14:180:4:o;737:222::-;802:9;;;823:10;;;820:133;;;875:10;870:3;866:20;863:1;856:31;910:4;907:1;900:15;938:4;935:1;928:15", - "linkReferences": {} - }, - "methodIdentifiers": { "inc(uint256)": "812600df" }, - "rawMetadata": "{\"compiler\":{\"version\":\"0.8.19+commit.7dd6d404\"},\"language\":\"Solidity\",\"output\":{\"abi\":[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[],\"name\":\"Trigger\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"inc\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}],\"devdoc\":{\"kind\":\"dev\",\"methods\":{},\"version\":1},\"userdoc\":{\"kind\":\"user\",\"methods\":{},\"version\":1}},\"settings\":{\"compilationTarget\":{\"src/LimitedContract.sol\":\"LimitedContract\"},\"evmVersion\":\"paris\",\"libraries\":{},\"metadata\":{\"bytecodeHash\":\"ipfs\"},\"optimizer\":{\"enabled\":true,\"runs\":200},\"remappings\":[]},\"sources\":{\"src/LimitedContract.sol\":{\"keccak256\":\"0x7b291e6c8d7562ba65f036bd8b25c87587c57f5c35d5a6ea587a4eb6c7de4b02\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://b7b7d9ad73d3f266dff610553eac7a1454f71e616036b0b50cee8610b999c2eb\",\"dweb:/ipfs/QmcdMqSxkNDwHJ8pMyh2jK2sA6Xrk4VSdm4nqZ86EK2Vut\"]}},\"version\":1}", - "metadata": { - "compiler": { "version": "0.8.19+commit.7dd6d404" }, - "language": "Solidity", - "output": { - "abi": [ - { - "inputs": [], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "inputs": [], - "type": "event", - "name": "Trigger", - "anonymous": false - }, - { - "inputs": [ - { "internalType": "uint256", "name": "value", "type": "uint256" } - ], - "stateMutability": "pure", - "type": "function", - "name": "inc", - "outputs": [ - { "internalType": "uint256", "name": "", "type": "uint256" } - ] - } - ], - "devdoc": { "kind": "dev", "methods": {}, "version": 1 }, - "userdoc": { "kind": "user", "methods": {}, "version": 1 } - }, - "settings": { - "remappings": [], - "optimizer": { "enabled": true, "runs": 200 }, - "metadata": { "bytecodeHash": "ipfs" }, - "compilationTarget": { "src/LimitedContract.sol": "LimitedContract" }, - "evmVersion": "paris", - "libraries": {} - }, - "sources": { - "src/LimitedContract.sol": { - "keccak256": "0x7b291e6c8d7562ba65f036bd8b25c87587c57f5c35d5a6ea587a4eb6c7de4b02", - "urls": [ - "bzz-raw://b7b7d9ad73d3f266dff610553eac7a1454f71e616036b0b50cee8610b999c2eb", - "dweb:/ipfs/QmcdMqSxkNDwHJ8pMyh2jK2sA6Xrk4VSdm4nqZ86EK2Vut" - ], - "license": "MIT" - } - }, - "version": 1 - }, - "ast": { - "absolutePath": "src/LimitedContract.sol", - "id": 31, - "exportedSymbols": { "LimitedContract": [30] }, - "nodeType": "SourceUnit", - "src": "32:283:0", - "nodes": [ - { - "id": 1, - "nodeType": "PragmaDirective", - "src": "32:23:0", - "nodes": [], - "literals": ["solidity", "^", "0.8", ".0"] - }, - { - "id": 30, - "nodeType": "ContractDefinition", - "src": "57:257:0", - "nodes": [ - { - "id": 3, - "nodeType": "EventDefinition", - "src": "88:16:0", - "nodes": [], - "anonymous": false, - "eventSelector": "3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d", - "name": "Trigger", - "nameLocation": "94:7:0", - "parameters": { - "id": 2, - "nodeType": "ParameterList", - "parameters": [], - "src": "101:2:0" - } - }, - { - "id": 10, - "nodeType": "FunctionDefinition", - "src": "110:45:0", - "nodes": [], - "body": { - "id": 9, - "nodeType": "Block", - "src": "124:31:0", - "nodes": [], - "statements": [ - { - "eventCall": { - "arguments": [], - "expression": { - "argumentTypes": [], - "id": 6, - "name": "Trigger", - "nodeType": "Identifier", - "overloadedDeclarations": [], - "referencedDeclaration": 3, - "src": "139:7:0", - "typeDescriptions": { - "typeIdentifier": "t_function_event_nonpayable$__$returns$__$", - "typeString": "function ()" - } - }, - "id": 7, - "isConstant": false, - "isLValue": false, - "isPure": false, - "kind": "functionCall", - "lValueRequested": false, - "nameLocations": [], - "names": [], - "nodeType": "FunctionCall", - "src": "139:9:0", - "tryCall": false, - "typeDescriptions": { - "typeIdentifier": "t_tuple$__$", - "typeString": "tuple()" - } - }, - "id": 8, - "nodeType": "EmitStatement", - "src": "134:14:0" - } - ] - }, - "implemented": true, - "kind": "constructor", - "modifiers": [], - "name": "", - "nameLocation": "-1:-1:-1", - "parameters": { - "id": 4, - "nodeType": "ParameterList", - "parameters": [], - "src": "121:2:0" - }, - "returnParameters": { - "id": 5, - "nodeType": "ParameterList", - "parameters": [], - "src": "124:0:0" - }, - "scope": 30, - "stateMutability": "nonpayable", - "virtual": false, - "visibility": "public" - }, - { - "id": 29, - "nodeType": "FunctionDefinition", - "src": "161:151:0", - "nodes": [], - "body": { - "id": 28, - "nodeType": "Block", - "src": "219:93:0", - "nodes": [], - "statements": [ - { - "expression": { - "arguments": [ - { - "commonType": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - }, - "id": 20, - "isConstant": false, - "isLValue": false, - "isPure": false, - "lValueRequested": false, - "leftExpression": { - "id": 18, - "name": "value", - "nodeType": "Identifier", - "overloadedDeclarations": [], - "referencedDeclaration": 12, - "src": "237:5:0", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - "nodeType": "BinaryOperation", - "operator": "<", - "rightExpression": { - "hexValue": "3130", - "id": 19, - "isConstant": false, - "isLValue": false, - "isPure": true, - "kind": "number", - "lValueRequested": false, - "nodeType": "Literal", - "src": "245:2:0", - "typeDescriptions": { - "typeIdentifier": "t_rational_10_by_1", - "typeString": "int_const 10" - }, - "value": "10" - }, - "src": "237:10:0", - "typeDescriptions": { - "typeIdentifier": "t_bool", - "typeString": "bool" - } - }, - { - "hexValue": "63616e206f6e6c792068616e646c652076616c756573203c203130", - "id": 21, - "isConstant": false, - "isLValue": false, - "isPure": true, - "kind": "string", - "lValueRequested": false, - "nodeType": "Literal", - "src": "249:29:0", - "typeDescriptions": { - "typeIdentifier": "t_stringliteral_578cd1fc098748633f5d7d46bba428bb3129c1e63324f2b7151699cae5146449", - "typeString": "literal_string \"can only handle values < 10\"" - }, - "value": "can only handle values < 10" - } - ], - "expression": { - "argumentTypes": [ - { "typeIdentifier": "t_bool", "typeString": "bool" }, - { - "typeIdentifier": "t_stringliteral_578cd1fc098748633f5d7d46bba428bb3129c1e63324f2b7151699cae5146449", - "typeString": "literal_string \"can only handle values < 10\"" - } - ], - "id": 17, - "name": "require", - "nodeType": "Identifier", - "overloadedDeclarations": [-18, -18], - "referencedDeclaration": -18, - "src": "229:7:0", - "typeDescriptions": { - "typeIdentifier": "t_function_require_pure$_t_bool_$_t_string_memory_ptr_$returns$__$", - "typeString": "function (bool,string memory) pure" - } - }, - "id": 22, - "isConstant": false, - "isLValue": false, - "isPure": false, - "kind": "functionCall", - "lValueRequested": false, - "nameLocations": [], - "names": [], - "nodeType": "FunctionCall", - "src": "229:50:0", - "tryCall": false, - "typeDescriptions": { - "typeIdentifier": "t_tuple$__$", - "typeString": "tuple()" - } - }, - "id": 23, - "nodeType": "ExpressionStatement", - "src": "229:50:0" - }, - { - "expression": { - "commonType": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - }, - "id": 26, - "isConstant": false, - "isLValue": false, - "isPure": false, - "lValueRequested": false, - "leftExpression": { - "id": 24, - "name": "value", - "nodeType": "Identifier", - "overloadedDeclarations": [], - "referencedDeclaration": 12, - "src": "296:5:0", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - "nodeType": "BinaryOperation", - "operator": "+", - "rightExpression": { - "hexValue": "31", - "id": 25, - "isConstant": false, - "isLValue": false, - "isPure": true, - "kind": "number", - "lValueRequested": false, - "nodeType": "Literal", - "src": "304:1:0", - "typeDescriptions": { - "typeIdentifier": "t_rational_1_by_1", - "typeString": "int_const 1" - }, - "value": "1" - }, - "src": "296:9:0", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - "functionReturnParameters": 16, - "id": 27, - "nodeType": "Return", - "src": "289:16:0" - } - ] - }, - "functionSelector": "812600df", - "implemented": true, - "kind": "function", - "modifiers": [], - "name": "inc", - "nameLocation": "170:3:0", - "parameters": { - "id": 13, - "nodeType": "ParameterList", - "parameters": [ - { - "constant": false, - "id": 12, - "mutability": "mutable", - "name": "value", - "nameLocation": "182:5:0", - "nodeType": "VariableDeclaration", - "scope": 29, - "src": "174:13:0", - "stateVariable": false, - "storageLocation": "default", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - }, - "typeName": { - "id": 11, - "name": "uint256", - "nodeType": "ElementaryTypeName", - "src": "174:7:0", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - "visibility": "internal" - } - ], - "src": "173:15:0" - }, - "returnParameters": { - "id": 16, - "nodeType": "ParameterList", - "parameters": [ - { - "constant": false, - "id": 15, - "mutability": "mutable", - "name": "", - "nameLocation": "-1:-1:-1", - "nodeType": "VariableDeclaration", - "scope": 29, - "src": "210:7:0", - "stateVariable": false, - "storageLocation": "default", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - }, - "typeName": { - "id": 14, - "name": "uint256", - "nodeType": "ElementaryTypeName", - "src": "210:7:0", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - "visibility": "internal" - } - ], - "src": "209:9:0" - }, - "scope": 30, - "stateMutability": "pure", - "virtual": false, - "visibility": "public" - } - ], - "abstract": false, - "baseContracts": [], - "canonicalName": "LimitedContract", - "contractDependencies": [], - "contractKind": "contract", - "fullyImplemented": true, - "linearizedBaseContracts": [30], - "name": "LimitedContract", - "nameLocation": "66:15:0", - "scope": 31, - "usedErrors": [] - } - ], - "license": "MIT" - }, - "id": 0 -} +{"abi":[{"type":"constructor","inputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"inc","inputs":[{"name":"value","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"pure"},{"type":"event","name":"Trigger","inputs":[],"anonymous":false}],"bytecode":{"object":"0x608060405234801561001057600080fd5b506040517f3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d90600090a1610120806100496000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063812600df14602d575b600080fd5b603c603836600460b2565b604e565b60405190815260200160405180910390f35b6000600a821060a35760405162461bcd60e51b815260206004820152601b60248201527f63616e206f6e6c792068616e646c652076616c756573203c2031300000000000604482015260640160405180910390fd5b60ac82600160ca565b92915050565b60006020828403121560c357600080fd5b5035919050565b8082018082111560ac57634e487b7160e01b600052601160045260246000fdfea264697066735822122045679e894d199dcf13e7f3e6d9816bf08cd9cceab355500d502bbfada548205f64736f6c63430008130033","sourceMap":"57:257:0:-:0;;;110:45;;;;;;;;;-1:-1:-1;139:9:0;;;;;;;57:257;;;;;;","linkReferences":{}},"deployedBytecode":{"object":"0x6080604052348015600f57600080fd5b506004361060285760003560e01c8063812600df14602d575b600080fd5b603c603836600460b2565b604e565b60405190815260200160405180910390f35b6000600a821060a35760405162461bcd60e51b815260206004820152601b60248201527f63616e206f6e6c792068616e646c652076616c756573203c2031300000000000604482015260640160405180910390fd5b60ac82600160ca565b92915050565b60006020828403121560c357600080fd5b5035919050565b8082018082111560ac57634e487b7160e01b600052601160045260246000fdfea264697066735822122045679e894d199dcf13e7f3e6d9816bf08cd9cceab355500d502bbfada548205f64736f6c63430008130033","sourceMap":"57:257:0:-:0;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;161:151;;;;;;:::i;:::-;;:::i;:::-;;;345:25:4;;;333:2;318:18;161:151:0;;;;;;;;210:7;245:2;237:5;:10;229:50;;;;-1:-1:-1;;;229:50:0;;583:2:4;229:50:0;;;565:21:4;622:2;602:18;;;595:30;661:29;641:18;;;634:57;708:18;;229:50:0;;;;;;;;296:9;:5;304:1;296:9;:::i;:::-;289:16;161:151;-1:-1:-1;;161:151:0:o;14:180:4:-;73:6;126:2;114:9;105:7;101:23;97:32;94:52;;;142:1;139;132:12;94:52;-1:-1:-1;165:23:4;;14:180;-1:-1:-1;14:180:4:o;737:222::-;802:9;;;823:10;;;820:133;;;875:10;870:3;866:20;863:1;856:31;910:4;907:1;900:15;938:4;935:1;928:15","linkReferences":{}},"methodIdentifiers":{"inc(uint256)":"812600df"},"rawMetadata":"{\"compiler\":{\"version\":\"0.8.19+commit.7dd6d404\"},\"language\":\"Solidity\",\"output\":{\"abi\":[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[],\"name\":\"Trigger\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"inc\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}],\"devdoc\":{\"kind\":\"dev\",\"methods\":{},\"version\":1},\"userdoc\":{\"kind\":\"user\",\"methods\":{},\"version\":1}},\"settings\":{\"compilationTarget\":{\"src/LimitedContract.sol\":\"LimitedContract\"},\"evmVersion\":\"paris\",\"libraries\":{},\"metadata\":{\"bytecodeHash\":\"ipfs\"},\"optimizer\":{\"enabled\":true,\"runs\":200},\"remappings\":[]},\"sources\":{\"src/LimitedContract.sol\":{\"keccak256\":\"0x7b291e6c8d7562ba65f036bd8b25c87587c57f5c35d5a6ea587a4eb6c7de4b02\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://b7b7d9ad73d3f266dff610553eac7a1454f71e616036b0b50cee8610b999c2eb\",\"dweb:/ipfs/QmcdMqSxkNDwHJ8pMyh2jK2sA6Xrk4VSdm4nqZ86EK2Vut\"]}},\"version\":1}","metadata":{"compiler":{"version":"0.8.19+commit.7dd6d404"},"language":"Solidity","output":{"abi":[{"inputs":[],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[],"type":"event","name":"Trigger","anonymous":false},{"inputs":[{"internalType":"uint256","name":"value","type":"uint256"}],"stateMutability":"pure","type":"function","name":"inc","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]}],"devdoc":{"kind":"dev","methods":{},"version":1},"userdoc":{"kind":"user","methods":{},"version":1}},"settings":{"remappings":[],"optimizer":{"enabled":true,"runs":200},"metadata":{"bytecodeHash":"ipfs"},"compilationTarget":{"src/LimitedContract.sol":"LimitedContract"},"evmVersion":"paris","libraries":{}},"sources":{"src/LimitedContract.sol":{"keccak256":"0x7b291e6c8d7562ba65f036bd8b25c87587c57f5c35d5a6ea587a4eb6c7de4b02","urls":["bzz-raw://b7b7d9ad73d3f266dff610553eac7a1454f71e616036b0b50cee8610b999c2eb","dweb:/ipfs/QmcdMqSxkNDwHJ8pMyh2jK2sA6Xrk4VSdm4nqZ86EK2Vut"],"license":"MIT"}},"version":1},"ast":{"absolutePath":"src/LimitedContract.sol","id":31,"exportedSymbols":{"LimitedContract":[30]},"nodeType":"SourceUnit","src":"32:283:0","nodes":[{"id":1,"nodeType":"PragmaDirective","src":"32:23:0","nodes":[],"literals":["solidity","^","0.8",".0"]},{"id":30,"nodeType":"ContractDefinition","src":"57:257:0","nodes":[{"id":3,"nodeType":"EventDefinition","src":"88:16:0","nodes":[],"anonymous":false,"eventSelector":"3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d","name":"Trigger","nameLocation":"94:7:0","parameters":{"id":2,"nodeType":"ParameterList","parameters":[],"src":"101:2:0"}},{"id":10,"nodeType":"FunctionDefinition","src":"110:45:0","nodes":[],"body":{"id":9,"nodeType":"Block","src":"124:31:0","nodes":[],"statements":[{"eventCall":{"arguments":[],"expression":{"argumentTypes":[],"id":6,"name":"Trigger","nodeType":"Identifier","overloadedDeclarations":[],"referencedDeclaration":3,"src":"139:7:0","typeDescriptions":{"typeIdentifier":"t_function_event_nonpayable$__$returns$__$","typeString":"function ()"}},"id":7,"isConstant":false,"isLValue":false,"isPure":false,"kind":"functionCall","lValueRequested":false,"nameLocations":[],"names":[],"nodeType":"FunctionCall","src":"139:9:0","tryCall":false,"typeDescriptions":{"typeIdentifier":"t_tuple$__$","typeString":"tuple()"}},"id":8,"nodeType":"EmitStatement","src":"134:14:0"}]},"implemented":true,"kind":"constructor","modifiers":[],"name":"","nameLocation":"-1:-1:-1","parameters":{"id":4,"nodeType":"ParameterList","parameters":[],"src":"121:2:0"},"returnParameters":{"id":5,"nodeType":"ParameterList","parameters":[],"src":"124:0:0"},"scope":30,"stateMutability":"nonpayable","virtual":false,"visibility":"public"},{"id":29,"nodeType":"FunctionDefinition","src":"161:151:0","nodes":[],"body":{"id":28,"nodeType":"Block","src":"219:93:0","nodes":[],"statements":[{"expression":{"arguments":[{"commonType":{"typeIdentifier":"t_uint256","typeString":"uint256"},"id":20,"isConstant":false,"isLValue":false,"isPure":false,"lValueRequested":false,"leftExpression":{"id":18,"name":"value","nodeType":"Identifier","overloadedDeclarations":[],"referencedDeclaration":12,"src":"237:5:0","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},"nodeType":"BinaryOperation","operator":"<","rightExpression":{"hexValue":"3130","id":19,"isConstant":false,"isLValue":false,"isPure":true,"kind":"number","lValueRequested":false,"nodeType":"Literal","src":"245:2:0","typeDescriptions":{"typeIdentifier":"t_rational_10_by_1","typeString":"int_const 10"},"value":"10"},"src":"237:10:0","typeDescriptions":{"typeIdentifier":"t_bool","typeString":"bool"}},{"hexValue":"63616e206f6e6c792068616e646c652076616c756573203c203130","id":21,"isConstant":false,"isLValue":false,"isPure":true,"kind":"string","lValueRequested":false,"nodeType":"Literal","src":"249:29:0","typeDescriptions":{"typeIdentifier":"t_stringliteral_578cd1fc098748633f5d7d46bba428bb3129c1e63324f2b7151699cae5146449","typeString":"literal_string \"can only handle values < 10\""},"value":"can only handle values < 10"}],"expression":{"argumentTypes":[{"typeIdentifier":"t_bool","typeString":"bool"},{"typeIdentifier":"t_stringliteral_578cd1fc098748633f5d7d46bba428bb3129c1e63324f2b7151699cae5146449","typeString":"literal_string \"can only handle values < 10\""}],"id":17,"name":"require","nodeType":"Identifier","overloadedDeclarations":[-18,-18],"referencedDeclaration":-18,"src":"229:7:0","typeDescriptions":{"typeIdentifier":"t_function_require_pure$_t_bool_$_t_string_memory_ptr_$returns$__$","typeString":"function (bool,string memory) pure"}},"id":22,"isConstant":false,"isLValue":false,"isPure":false,"kind":"functionCall","lValueRequested":false,"nameLocations":[],"names":[],"nodeType":"FunctionCall","src":"229:50:0","tryCall":false,"typeDescriptions":{"typeIdentifier":"t_tuple$__$","typeString":"tuple()"}},"id":23,"nodeType":"ExpressionStatement","src":"229:50:0"},{"expression":{"commonType":{"typeIdentifier":"t_uint256","typeString":"uint256"},"id":26,"isConstant":false,"isLValue":false,"isPure":false,"lValueRequested":false,"leftExpression":{"id":24,"name":"value","nodeType":"Identifier","overloadedDeclarations":[],"referencedDeclaration":12,"src":"296:5:0","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},"nodeType":"BinaryOperation","operator":"+","rightExpression":{"hexValue":"31","id":25,"isConstant":false,"isLValue":false,"isPure":true,"kind":"number","lValueRequested":false,"nodeType":"Literal","src":"304:1:0","typeDescriptions":{"typeIdentifier":"t_rational_1_by_1","typeString":"int_const 1"},"value":"1"},"src":"296:9:0","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},"functionReturnParameters":16,"id":27,"nodeType":"Return","src":"289:16:0"}]},"functionSelector":"812600df","implemented":true,"kind":"function","modifiers":[],"name":"inc","nameLocation":"170:3:0","parameters":{"id":13,"nodeType":"ParameterList","parameters":[{"constant":false,"id":12,"mutability":"mutable","name":"value","nameLocation":"182:5:0","nodeType":"VariableDeclaration","scope":29,"src":"174:13:0","stateVariable":false,"storageLocation":"default","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"},"typeName":{"id":11,"name":"uint256","nodeType":"ElementaryTypeName","src":"174:7:0","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},"visibility":"internal"}],"src":"173:15:0"},"returnParameters":{"id":16,"nodeType":"ParameterList","parameters":[{"constant":false,"id":15,"mutability":"mutable","name":"","nameLocation":"-1:-1:-1","nodeType":"VariableDeclaration","scope":29,"src":"210:7:0","stateVariable":false,"storageLocation":"default","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"},"typeName":{"id":14,"name":"uint256","nodeType":"ElementaryTypeName","src":"210:7:0","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},"visibility":"internal"}],"src":"209:9:0"},"scope":30,"stateMutability":"pure","virtual":false,"visibility":"public"}],"abstract":false,"baseContracts":[],"canonicalName":"LimitedContract","contractDependencies":[],"contractKind":"contract","fullyImplemented":true,"linearizedBaseContracts":[30],"name":"LimitedContract","nameLocation":"66:15:0","scope":31,"usedErrors":[]}],"license":"MIT"},"id":0} \ No newline at end of file diff --git a/tests/contracts/out/OverloadedContract.sol/OverloadedContract.json b/tests/contracts/out/OverloadedContract.sol/OverloadedContract.json index 6d14e1951d4..5c4fc74d7cf 100644 --- a/tests/contracts/out/OverloadedContract.sol/OverloadedContract.json +++ b/tests/contracts/out/OverloadedContract.sol/OverloadedContract.json @@ -1,583 +1 @@ -{ - "abi": [ - { "type": "constructor", "inputs": [], "stateMutability": "nonpayable" }, - { - "type": "function", - "name": "exampleFunction", - "inputs": [{ "name": "", "type": "bytes32", "internalType": "bytes32" }], - "outputs": [{ "name": "", "type": "uint256", "internalType": "uint256" }], - "stateMutability": "pure" - }, - { - "type": "function", - "name": "exampleFunction", - "inputs": [{ "name": "", "type": "uint256", "internalType": "uint256" }], - "outputs": [{ "name": "", "type": "string", "internalType": "string" }], - "stateMutability": "pure" - }, - { - "type": "function", - "name": "exampleFunction", - "inputs": [{ "name": "", "type": "string", "internalType": "string" }], - "outputs": [{ "name": "", "type": "string", "internalType": "string" }], - "stateMutability": "pure" - }, - { "type": "event", "name": "Trigger", "inputs": [], "anonymous": false } - ], - "bytecode": { - "object": "0x608060405234801561001057600080fd5b506040517f3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d90600090a1610252806100496000396000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c806331870cbc14610046578063934bc29d1461006e578063bc2d73ba146100b5575b600080fd5b61005b6100543660046100ee565b5061010090565b6040519081526020015b60405180910390f35b6100a861007c3660046100ee565b5060408051808201909152601181527075696e74323536202d3e20737472696e6760781b602082015290565b6040516100659190610107565b6100a86100c336600461016b565b5060408051808201909152601081526f737472696e67202d3e20737472696e6760801b602082015290565b60006020828403121561010057600080fd5b5035919050565b600060208083528351808285015260005b8181101561013457858101830151858201604001528201610118565b506000604082860101526040601f19601f8301168501019250505092915050565b634e487b7160e01b600052604160045260246000fd5b60006020828403121561017d57600080fd5b813567ffffffffffffffff8082111561019557600080fd5b818401915084601f8301126101a957600080fd5b8135818111156101bb576101bb610155565b604051601f8201601f19908116603f011681019083821181831017156101e3576101e3610155565b816040528281528760208487010111156101fc57600080fd5b82602086016020830137600092810160200192909252509594505050505056fea2646970667358221220d7abec9e326f4c25cc8f45f8ee265c92b595b8cf7f1d5a1d863735dee11ed7d064736f6c63430008130033", - "sourceMap": "57:457:1:-:0;;;113:45;;;;;;;;;-1:-1:-1;142:9:1;;;;;;;57:457;;;;;;", - "linkReferences": {} - }, - "deployedBytecode": { - "object": "0x608060405234801561001057600080fd5b50600436106100415760003560e01c806331870cbc14610046578063934bc29d1461006e578063bc2d73ba146100b5575b600080fd5b61005b6100543660046100ee565b5061010090565b6040519081526020015b60405180910390f35b6100a861007c3660046100ee565b5060408051808201909152601181527075696e74323536202d3e20737472696e6760781b602082015290565b6040516100659190610107565b6100a86100c336600461016b565b5060408051808201909152601081526f737472696e67202d3e20737472696e6760801b602082015290565b60006020828403121561010057600080fd5b5035919050565b600060208083528351808285015260005b8181101561013457858101830151858201604001528201610118565b506000604082860101526040601f19601f8301168501019250505092915050565b634e487b7160e01b600052604160045260246000fd5b60006020828403121561017d57600080fd5b813567ffffffffffffffff8082111561019557600080fd5b818401915084601f8301126101a957600080fd5b8135818111156101bb576101bb610155565b604051601f8201601f19908116603f011681019083821181831017156101e3576101e3610155565b816040528281528760208487010111156101fc57600080fd5b82602086016020830137600092810160200192909252509594505050505056fea2646970667358221220d7abec9e326f4c25cc8f45f8ee265c92b595b8cf7f1d5a1d863735dee11ed7d064736f6c63430008130033", - "sourceMap": "57:457:1:-:0;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;421:91;;;;;;:::i;:::-;-1:-1:-1;502:3:1;;421:91;;;;345:25:4;;;333:2;318:18;421:91:1;;;;;;;;302:113;;;;;;:::i;:::-;-1:-1:-1;382:26:1;;;;;;;;;;;;-1:-1:-1;;;382:26:1;;;;;302:113;;;;;;;;:::i;164:132::-;;;;;;:::i;:::-;-1:-1:-1;264:25:1;;;;;;;;;;;;-1:-1:-1;;;264:25:1;;;;;164:132;14:180:4;73:6;126:2;114:9;105:7;101:23;97:32;94:52;;;142:1;139;132:12;94:52;-1:-1:-1;165:23:4;;14:180;-1:-1:-1;14:180:4:o;566:548::-;678:4;707:2;736;725:9;718:21;768:6;762:13;811:6;806:2;795:9;791:18;784:34;836:1;846:140;860:6;857:1;854:13;846:140;;;955:14;;;951:23;;945:30;921:17;;;940:2;917:26;910:66;875:10;;846:140;;;850:3;1035:1;1030:2;1021:6;1010:9;1006:22;1002:31;995:42;1105:2;1098;1094:7;1089:2;1081:6;1077:15;1073:29;1062:9;1058:45;1054:54;1046:62;;;;566:548;;;;:::o;1119:127::-;1180:10;1175:3;1171:20;1168:1;1161:31;1211:4;1208:1;1201:15;1235:4;1232:1;1225:15;1251:922;1320:6;1373:2;1361:9;1352:7;1348:23;1344:32;1341:52;;;1389:1;1386;1379:12;1341:52;1429:9;1416:23;1458:18;1499:2;1491:6;1488:14;1485:34;;;1515:1;1512;1505:12;1485:34;1553:6;1542:9;1538:22;1528:32;;1598:7;1591:4;1587:2;1583:13;1579:27;1569:55;;1620:1;1617;1610:12;1569:55;1656:2;1643:16;1678:2;1674;1671:10;1668:36;;;1684:18;;:::i;:::-;1759:2;1753:9;1727:2;1813:13;;-1:-1:-1;;1809:22:4;;;1833:2;1805:31;1801:40;1789:53;;;1857:18;;;1877:22;;;1854:46;1851:72;;;1903:18;;:::i;:::-;1943:10;1939:2;1932:22;1978:2;1970:6;1963:18;2018:7;2013:2;2008;2004;2000:11;1996:20;1993:33;1990:53;;;2039:1;2036;2029:12;1990:53;2095:2;2090;2086;2082:11;2077:2;2069:6;2065:15;2052:46;2140:1;2118:15;;;2135:2;2114:24;2107:35;;;;-1:-1:-1;2122:6:4;1251:922;-1:-1:-1;;;;;1251:922:4:o", - "linkReferences": {} - }, - "methodIdentifiers": { - "exampleFunction(bytes32)": "31870cbc", - "exampleFunction(string)": "bc2d73ba", - "exampleFunction(uint256)": "934bc29d" - }, - "rawMetadata": "{\"compiler\":{\"version\":\"0.8.19+commit.7dd6d404\"},\"language\":\"Solidity\",\"output\":{\"abi\":[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[],\"name\":\"Trigger\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"exampleFunction\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"exampleFunction\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"name\":\"exampleFunction\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}],\"devdoc\":{\"kind\":\"dev\",\"methods\":{},\"version\":1},\"userdoc\":{\"kind\":\"user\",\"methods\":{},\"version\":1}},\"settings\":{\"compilationTarget\":{\"src/OverloadedContract.sol\":\"OverloadedContract\"},\"evmVersion\":\"paris\",\"libraries\":{},\"metadata\":{\"bytecodeHash\":\"ipfs\"},\"optimizer\":{\"enabled\":true,\"runs\":200},\"remappings\":[]},\"sources\":{\"src/OverloadedContract.sol\":{\"keccak256\":\"0xc6734859398f3be8468d6e6c7fd8b03a52243223799ce17d5e4ab9d9aca1fc45\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://2c860b9cd7d0a2086e164ce38a2aa24a5b7f681bb575a5a656f732d3742761be\",\"dweb:/ipfs/QmPwazDSTPrNpVrRY2vunso7VXunWp5dn1641TzxK9eZfe\"]}},\"version\":1}", - "metadata": { - "compiler": { "version": "0.8.19+commit.7dd6d404" }, - "language": "Solidity", - "output": { - "abi": [ - { - "inputs": [], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "inputs": [], - "type": "event", - "name": "Trigger", - "anonymous": false - }, - { - "inputs": [ - { "internalType": "bytes32", "name": "", "type": "bytes32" } - ], - "stateMutability": "pure", - "type": "function", - "name": "exampleFunction", - "outputs": [ - { "internalType": "uint256", "name": "", "type": "uint256" } - ] - }, - { - "inputs": [ - { "internalType": "uint256", "name": "", "type": "uint256" } - ], - "stateMutability": "pure", - "type": "function", - "name": "exampleFunction", - "outputs": [ - { "internalType": "string", "name": "", "type": "string" } - ] - }, - { - "inputs": [ - { "internalType": "string", "name": "", "type": "string" } - ], - "stateMutability": "pure", - "type": "function", - "name": "exampleFunction", - "outputs": [ - { "internalType": "string", "name": "", "type": "string" } - ] - } - ], - "devdoc": { "kind": "dev", "methods": {}, "version": 1 }, - "userdoc": { "kind": "user", "methods": {}, "version": 1 } - }, - "settings": { - "remappings": [], - "optimizer": { "enabled": true, "runs": 200 }, - "metadata": { "bytecodeHash": "ipfs" }, - "compilationTarget": { - "src/OverloadedContract.sol": "OverloadedContract" - }, - "evmVersion": "paris", - "libraries": {} - }, - "sources": { - "src/OverloadedContract.sol": { - "keccak256": "0xc6734859398f3be8468d6e6c7fd8b03a52243223799ce17d5e4ab9d9aca1fc45", - "urls": [ - "bzz-raw://2c860b9cd7d0a2086e164ce38a2aa24a5b7f681bb575a5a656f732d3742761be", - "dweb:/ipfs/QmPwazDSTPrNpVrRY2vunso7VXunWp5dn1641TzxK9eZfe" - ], - "license": "MIT" - } - }, - "version": 1 - }, - "ast": { - "absolutePath": "src/OverloadedContract.sol", - "id": 73, - "exportedSymbols": { "OverloadedContract": [72] }, - "nodeType": "SourceUnit", - "src": "32:483:1", - "nodes": [ - { - "id": 32, - "nodeType": "PragmaDirective", - "src": "32:23:1", - "nodes": [], - "literals": ["solidity", "^", "0.8", ".0"] - }, - { - "id": 72, - "nodeType": "ContractDefinition", - "src": "57:457:1", - "nodes": [ - { - "id": 34, - "nodeType": "EventDefinition", - "src": "91:16:1", - "nodes": [], - "anonymous": false, - "eventSelector": "3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d", - "name": "Trigger", - "nameLocation": "97:7:1", - "parameters": { - "id": 33, - "nodeType": "ParameterList", - "parameters": [], - "src": "104:2:1" - } - }, - { - "id": 41, - "nodeType": "FunctionDefinition", - "src": "113:45:1", - "nodes": [], - "body": { - "id": 40, - "nodeType": "Block", - "src": "127:31:1", - "nodes": [], - "statements": [ - { - "eventCall": { - "arguments": [], - "expression": { - "argumentTypes": [], - "id": 37, - "name": "Trigger", - "nodeType": "Identifier", - "overloadedDeclarations": [], - "referencedDeclaration": 34, - "src": "142:7:1", - "typeDescriptions": { - "typeIdentifier": "t_function_event_nonpayable$__$returns$__$", - "typeString": "function ()" - } - }, - "id": 38, - "isConstant": false, - "isLValue": false, - "isPure": false, - "kind": "functionCall", - "lValueRequested": false, - "nameLocations": [], - "names": [], - "nodeType": "FunctionCall", - "src": "142:9:1", - "tryCall": false, - "typeDescriptions": { - "typeIdentifier": "t_tuple$__$", - "typeString": "tuple()" - } - }, - "id": 39, - "nodeType": "EmitStatement", - "src": "137:14:1" - } - ] - }, - "implemented": true, - "kind": "constructor", - "modifiers": [], - "name": "", - "nameLocation": "-1:-1:-1", - "parameters": { - "id": 35, - "nodeType": "ParameterList", - "parameters": [], - "src": "124:2:1" - }, - "returnParameters": { - "id": 36, - "nodeType": "ParameterList", - "parameters": [], - "src": "127:0:1" - }, - "scope": 72, - "stateMutability": "nonpayable", - "virtual": false, - "visibility": "public" - }, - { - "id": 51, - "nodeType": "FunctionDefinition", - "src": "164:132:1", - "nodes": [], - "body": { - "id": 50, - "nodeType": "Block", - "src": "254:42:1", - "nodes": [], - "statements": [ - { - "expression": { - "hexValue": "737472696e67202d3e20737472696e67", - "id": 48, - "isConstant": false, - "isLValue": false, - "isPure": true, - "kind": "string", - "lValueRequested": false, - "nodeType": "Literal", - "src": "271:18:1", - "typeDescriptions": { - "typeIdentifier": "t_stringliteral_a675d5271e48bf44b2d3a2abcbe5392d4a4159912e3d2d332a49139a8b50d538", - "typeString": "literal_string \"string -> string\"" - }, - "value": "string -> string" - }, - "functionReturnParameters": 47, - "id": 49, - "nodeType": "Return", - "src": "264:25:1" - } - ] - }, - "functionSelector": "bc2d73ba", - "implemented": true, - "kind": "function", - "modifiers": [], - "name": "exampleFunction", - "nameLocation": "173:15:1", - "parameters": { - "id": 44, - "nodeType": "ParameterList", - "parameters": [ - { - "constant": false, - "id": 43, - "mutability": "mutable", - "name": "", - "nameLocation": "-1:-1:-1", - "nodeType": "VariableDeclaration", - "scope": 51, - "src": "198:13:1", - "stateVariable": false, - "storageLocation": "memory", - "typeDescriptions": { - "typeIdentifier": "t_string_memory_ptr", - "typeString": "string" - }, - "typeName": { - "id": 42, - "name": "string", - "nodeType": "ElementaryTypeName", - "src": "198:6:1", - "typeDescriptions": { - "typeIdentifier": "t_string_storage_ptr", - "typeString": "string" - } - }, - "visibility": "internal" - } - ], - "src": "188:29:1" - }, - "returnParameters": { - "id": 47, - "nodeType": "ParameterList", - "parameters": [ - { - "constant": false, - "id": 46, - "mutability": "mutable", - "name": "", - "nameLocation": "-1:-1:-1", - "nodeType": "VariableDeclaration", - "scope": 51, - "src": "239:13:1", - "stateVariable": false, - "storageLocation": "memory", - "typeDescriptions": { - "typeIdentifier": "t_string_memory_ptr", - "typeString": "string" - }, - "typeName": { - "id": 45, - "name": "string", - "nodeType": "ElementaryTypeName", - "src": "239:6:1", - "typeDescriptions": { - "typeIdentifier": "t_string_storage_ptr", - "typeString": "string" - } - }, - "visibility": "internal" - } - ], - "src": "238:15:1" - }, - "scope": 72, - "stateMutability": "pure", - "virtual": false, - "visibility": "public" - }, - { - "id": 61, - "nodeType": "FunctionDefinition", - "src": "302:113:1", - "nodes": [], - "body": { - "id": 60, - "nodeType": "Block", - "src": "372:43:1", - "nodes": [], - "statements": [ - { - "expression": { - "hexValue": "75696e74323536202d3e20737472696e67", - "id": 58, - "isConstant": false, - "isLValue": false, - "isPure": true, - "kind": "string", - "lValueRequested": false, - "nodeType": "Literal", - "src": "389:19:1", - "typeDescriptions": { - "typeIdentifier": "t_stringliteral_56541f37aba8911ed7b3fc4c5c74297515444b42d7c1b74ff1c1abc66e2d65cd", - "typeString": "literal_string \"uint256 -> string\"" - }, - "value": "uint256 -> string" - }, - "functionReturnParameters": 57, - "id": 59, - "nodeType": "Return", - "src": "382:26:1" - } - ] - }, - "functionSelector": "934bc29d", - "implemented": true, - "kind": "function", - "modifiers": [], - "name": "exampleFunction", - "nameLocation": "311:15:1", - "parameters": { - "id": 54, - "nodeType": "ParameterList", - "parameters": [ - { - "constant": false, - "id": 53, - "mutability": "mutable", - "name": "", - "nameLocation": "-1:-1:-1", - "nodeType": "VariableDeclaration", - "scope": 61, - "src": "327:7:1", - "stateVariable": false, - "storageLocation": "default", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - }, - "typeName": { - "id": 52, - "name": "uint256", - "nodeType": "ElementaryTypeName", - "src": "327:7:1", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - "visibility": "internal" - } - ], - "src": "326:9:1" - }, - "returnParameters": { - "id": 57, - "nodeType": "ParameterList", - "parameters": [ - { - "constant": false, - "id": 56, - "mutability": "mutable", - "name": "", - "nameLocation": "-1:-1:-1", - "nodeType": "VariableDeclaration", - "scope": 61, - "src": "357:13:1", - "stateVariable": false, - "storageLocation": "memory", - "typeDescriptions": { - "typeIdentifier": "t_string_memory_ptr", - "typeString": "string" - }, - "typeName": { - "id": 55, - "name": "string", - "nodeType": "ElementaryTypeName", - "src": "357:6:1", - "typeDescriptions": { - "typeIdentifier": "t_string_storage_ptr", - "typeString": "string" - } - }, - "visibility": "internal" - } - ], - "src": "356:15:1" - }, - "scope": 72, - "stateMutability": "pure", - "virtual": false, - "visibility": "public" - }, - { - "id": 71, - "nodeType": "FunctionDefinition", - "src": "421:91:1", - "nodes": [], - "body": { - "id": 70, - "nodeType": "Block", - "src": "485:27:1", - "nodes": [], - "statements": [ - { - "expression": { - "hexValue": "323536", - "id": 68, - "isConstant": false, - "isLValue": false, - "isPure": true, - "kind": "number", - "lValueRequested": false, - "nodeType": "Literal", - "src": "502:3:1", - "typeDescriptions": { - "typeIdentifier": "t_rational_256_by_1", - "typeString": "int_const 256" - }, - "value": "256" - }, - "functionReturnParameters": 67, - "id": 69, - "nodeType": "Return", - "src": "495:10:1" - } - ] - }, - "functionSelector": "31870cbc", - "implemented": true, - "kind": "function", - "modifiers": [], - "name": "exampleFunction", - "nameLocation": "430:15:1", - "parameters": { - "id": 64, - "nodeType": "ParameterList", - "parameters": [ - { - "constant": false, - "id": 63, - "mutability": "mutable", - "name": "", - "nameLocation": "-1:-1:-1", - "nodeType": "VariableDeclaration", - "scope": 71, - "src": "446:7:1", - "stateVariable": false, - "storageLocation": "default", - "typeDescriptions": { - "typeIdentifier": "t_bytes32", - "typeString": "bytes32" - }, - "typeName": { - "id": 62, - "name": "bytes32", - "nodeType": "ElementaryTypeName", - "src": "446:7:1", - "typeDescriptions": { - "typeIdentifier": "t_bytes32", - "typeString": "bytes32" - } - }, - "visibility": "internal" - } - ], - "src": "445:9:1" - }, - "returnParameters": { - "id": 67, - "nodeType": "ParameterList", - "parameters": [ - { - "constant": false, - "id": 66, - "mutability": "mutable", - "name": "", - "nameLocation": "-1:-1:-1", - "nodeType": "VariableDeclaration", - "scope": 71, - "src": "476:7:1", - "stateVariable": false, - "storageLocation": "default", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - }, - "typeName": { - "id": 65, - "name": "uint256", - "nodeType": "ElementaryTypeName", - "src": "476:7:1", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - "visibility": "internal" - } - ], - "src": "475:9:1" - }, - "scope": 72, - "stateMutability": "pure", - "virtual": false, - "visibility": "public" - } - ], - "abstract": false, - "baseContracts": [], - "canonicalName": "OverloadedContract", - "contractDependencies": [], - "contractKind": "contract", - "fullyImplemented": true, - "linearizedBaseContracts": [72], - "name": "OverloadedContract", - "nameLocation": "66:18:1", - "scope": 73, - "usedErrors": [] - } - ], - "license": "MIT" - }, - "id": 1 -} +{"abi":[{"type":"constructor","inputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"exampleFunction","inputs":[{"name":"","type":"bytes32","internalType":"bytes32"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"pure"},{"type":"function","name":"exampleFunction","inputs":[{"name":"","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"string","internalType":"string"}],"stateMutability":"pure"},{"type":"function","name":"exampleFunction","inputs":[{"name":"","type":"string","internalType":"string"}],"outputs":[{"name":"","type":"string","internalType":"string"}],"stateMutability":"pure"},{"type":"event","name":"Trigger","inputs":[],"anonymous":false}],"bytecode":{"object":"0x608060405234801561001057600080fd5b506040517f3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d90600090a1610252806100496000396000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c806331870cbc14610046578063934bc29d1461006e578063bc2d73ba146100b5575b600080fd5b61005b6100543660046100ee565b5061010090565b6040519081526020015b60405180910390f35b6100a861007c3660046100ee565b5060408051808201909152601181527075696e74323536202d3e20737472696e6760781b602082015290565b6040516100659190610107565b6100a86100c336600461016b565b5060408051808201909152601081526f737472696e67202d3e20737472696e6760801b602082015290565b60006020828403121561010057600080fd5b5035919050565b600060208083528351808285015260005b8181101561013457858101830151858201604001528201610118565b506000604082860101526040601f19601f8301168501019250505092915050565b634e487b7160e01b600052604160045260246000fd5b60006020828403121561017d57600080fd5b813567ffffffffffffffff8082111561019557600080fd5b818401915084601f8301126101a957600080fd5b8135818111156101bb576101bb610155565b604051601f8201601f19908116603f011681019083821181831017156101e3576101e3610155565b816040528281528760208487010111156101fc57600080fd5b82602086016020830137600092810160200192909252509594505050505056fea2646970667358221220d7abec9e326f4c25cc8f45f8ee265c92b595b8cf7f1d5a1d863735dee11ed7d064736f6c63430008130033","sourceMap":"57:457:1:-:0;;;113:45;;;;;;;;;-1:-1:-1;142:9:1;;;;;;;57:457;;;;;;","linkReferences":{}},"deployedBytecode":{"object":"0x608060405234801561001057600080fd5b50600436106100415760003560e01c806331870cbc14610046578063934bc29d1461006e578063bc2d73ba146100b5575b600080fd5b61005b6100543660046100ee565b5061010090565b6040519081526020015b60405180910390f35b6100a861007c3660046100ee565b5060408051808201909152601181527075696e74323536202d3e20737472696e6760781b602082015290565b6040516100659190610107565b6100a86100c336600461016b565b5060408051808201909152601081526f737472696e67202d3e20737472696e6760801b602082015290565b60006020828403121561010057600080fd5b5035919050565b600060208083528351808285015260005b8181101561013457858101830151858201604001528201610118565b506000604082860101526040601f19601f8301168501019250505092915050565b634e487b7160e01b600052604160045260246000fd5b60006020828403121561017d57600080fd5b813567ffffffffffffffff8082111561019557600080fd5b818401915084601f8301126101a957600080fd5b8135818111156101bb576101bb610155565b604051601f8201601f19908116603f011681019083821181831017156101e3576101e3610155565b816040528281528760208487010111156101fc57600080fd5b82602086016020830137600092810160200192909252509594505050505056fea2646970667358221220d7abec9e326f4c25cc8f45f8ee265c92b595b8cf7f1d5a1d863735dee11ed7d064736f6c63430008130033","sourceMap":"57:457:1:-:0;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;421:91;;;;;;:::i;:::-;-1:-1:-1;502:3:1;;421:91;;;;345:25:4;;;333:2;318:18;421:91:1;;;;;;;;302:113;;;;;;:::i;:::-;-1:-1:-1;382:26:1;;;;;;;;;;;;-1:-1:-1;;;382:26:1;;;;;302:113;;;;;;;;:::i;164:132::-;;;;;;:::i;:::-;-1:-1:-1;264:25:1;;;;;;;;;;;;-1:-1:-1;;;264:25:1;;;;;164:132;14:180:4;73:6;126:2;114:9;105:7;101:23;97:32;94:52;;;142:1;139;132:12;94:52;-1:-1:-1;165:23:4;;14:180;-1:-1:-1;14:180:4:o;566:548::-;678:4;707:2;736;725:9;718:21;768:6;762:13;811:6;806:2;795:9;791:18;784:34;836:1;846:140;860:6;857:1;854:13;846:140;;;955:14;;;951:23;;945:30;921:17;;;940:2;917:26;910:66;875:10;;846:140;;;850:3;1035:1;1030:2;1021:6;1010:9;1006:22;1002:31;995:42;1105:2;1098;1094:7;1089:2;1081:6;1077:15;1073:29;1062:9;1058:45;1054:54;1046:62;;;;566:548;;;;:::o;1119:127::-;1180:10;1175:3;1171:20;1168:1;1161:31;1211:4;1208:1;1201:15;1235:4;1232:1;1225:15;1251:922;1320:6;1373:2;1361:9;1352:7;1348:23;1344:32;1341:52;;;1389:1;1386;1379:12;1341:52;1429:9;1416:23;1458:18;1499:2;1491:6;1488:14;1485:34;;;1515:1;1512;1505:12;1485:34;1553:6;1542:9;1538:22;1528:32;;1598:7;1591:4;1587:2;1583:13;1579:27;1569:55;;1620:1;1617;1610:12;1569:55;1656:2;1643:16;1678:2;1674;1671:10;1668:36;;;1684:18;;:::i;:::-;1759:2;1753:9;1727:2;1813:13;;-1:-1:-1;;1809:22:4;;;1833:2;1805:31;1801:40;1789:53;;;1857:18;;;1877:22;;;1854:46;1851:72;;;1903:18;;:::i;:::-;1943:10;1939:2;1932:22;1978:2;1970:6;1963:18;2018:7;2013:2;2008;2004;2000:11;1996:20;1993:33;1990:53;;;2039:1;2036;2029:12;1990:53;2095:2;2090;2086;2082:11;2077:2;2069:6;2065:15;2052:46;2140:1;2118:15;;;2135:2;2114:24;2107:35;;;;-1:-1:-1;2122:6:4;1251:922;-1:-1:-1;;;;;1251:922:4:o","linkReferences":{}},"methodIdentifiers":{"exampleFunction(bytes32)":"31870cbc","exampleFunction(string)":"bc2d73ba","exampleFunction(uint256)":"934bc29d"},"rawMetadata":"{\"compiler\":{\"version\":\"0.8.19+commit.7dd6d404\"},\"language\":\"Solidity\",\"output\":{\"abi\":[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[],\"name\":\"Trigger\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"exampleFunction\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"exampleFunction\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"name\":\"exampleFunction\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}],\"devdoc\":{\"kind\":\"dev\",\"methods\":{},\"version\":1},\"userdoc\":{\"kind\":\"user\",\"methods\":{},\"version\":1}},\"settings\":{\"compilationTarget\":{\"src/OverloadedContract.sol\":\"OverloadedContract\"},\"evmVersion\":\"paris\",\"libraries\":{},\"metadata\":{\"bytecodeHash\":\"ipfs\"},\"optimizer\":{\"enabled\":true,\"runs\":200},\"remappings\":[]},\"sources\":{\"src/OverloadedContract.sol\":{\"keccak256\":\"0xc6734859398f3be8468d6e6c7fd8b03a52243223799ce17d5e4ab9d9aca1fc45\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://2c860b9cd7d0a2086e164ce38a2aa24a5b7f681bb575a5a656f732d3742761be\",\"dweb:/ipfs/QmPwazDSTPrNpVrRY2vunso7VXunWp5dn1641TzxK9eZfe\"]}},\"version\":1}","metadata":{"compiler":{"version":"0.8.19+commit.7dd6d404"},"language":"Solidity","output":{"abi":[{"inputs":[],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[],"type":"event","name":"Trigger","anonymous":false},{"inputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"pure","type":"function","name":"exampleFunction","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"pure","type":"function","name":"exampleFunction","outputs":[{"internalType":"string","name":"","type":"string"}]},{"inputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"pure","type":"function","name":"exampleFunction","outputs":[{"internalType":"string","name":"","type":"string"}]}],"devdoc":{"kind":"dev","methods":{},"version":1},"userdoc":{"kind":"user","methods":{},"version":1}},"settings":{"remappings":[],"optimizer":{"enabled":true,"runs":200},"metadata":{"bytecodeHash":"ipfs"},"compilationTarget":{"src/OverloadedContract.sol":"OverloadedContract"},"evmVersion":"paris","libraries":{}},"sources":{"src/OverloadedContract.sol":{"keccak256":"0xc6734859398f3be8468d6e6c7fd8b03a52243223799ce17d5e4ab9d9aca1fc45","urls":["bzz-raw://2c860b9cd7d0a2086e164ce38a2aa24a5b7f681bb575a5a656f732d3742761be","dweb:/ipfs/QmPwazDSTPrNpVrRY2vunso7VXunWp5dn1641TzxK9eZfe"],"license":"MIT"}},"version":1},"ast":{"absolutePath":"src/OverloadedContract.sol","id":73,"exportedSymbols":{"OverloadedContract":[72]},"nodeType":"SourceUnit","src":"32:483:1","nodes":[{"id":32,"nodeType":"PragmaDirective","src":"32:23:1","nodes":[],"literals":["solidity","^","0.8",".0"]},{"id":72,"nodeType":"ContractDefinition","src":"57:457:1","nodes":[{"id":34,"nodeType":"EventDefinition","src":"91:16:1","nodes":[],"anonymous":false,"eventSelector":"3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d","name":"Trigger","nameLocation":"97:7:1","parameters":{"id":33,"nodeType":"ParameterList","parameters":[],"src":"104:2:1"}},{"id":41,"nodeType":"FunctionDefinition","src":"113:45:1","nodes":[],"body":{"id":40,"nodeType":"Block","src":"127:31:1","nodes":[],"statements":[{"eventCall":{"arguments":[],"expression":{"argumentTypes":[],"id":37,"name":"Trigger","nodeType":"Identifier","overloadedDeclarations":[],"referencedDeclaration":34,"src":"142:7:1","typeDescriptions":{"typeIdentifier":"t_function_event_nonpayable$__$returns$__$","typeString":"function ()"}},"id":38,"isConstant":false,"isLValue":false,"isPure":false,"kind":"functionCall","lValueRequested":false,"nameLocations":[],"names":[],"nodeType":"FunctionCall","src":"142:9:1","tryCall":false,"typeDescriptions":{"typeIdentifier":"t_tuple$__$","typeString":"tuple()"}},"id":39,"nodeType":"EmitStatement","src":"137:14:1"}]},"implemented":true,"kind":"constructor","modifiers":[],"name":"","nameLocation":"-1:-1:-1","parameters":{"id":35,"nodeType":"ParameterList","parameters":[],"src":"124:2:1"},"returnParameters":{"id":36,"nodeType":"ParameterList","parameters":[],"src":"127:0:1"},"scope":72,"stateMutability":"nonpayable","virtual":false,"visibility":"public"},{"id":51,"nodeType":"FunctionDefinition","src":"164:132:1","nodes":[],"body":{"id":50,"nodeType":"Block","src":"254:42:1","nodes":[],"statements":[{"expression":{"hexValue":"737472696e67202d3e20737472696e67","id":48,"isConstant":false,"isLValue":false,"isPure":true,"kind":"string","lValueRequested":false,"nodeType":"Literal","src":"271:18:1","typeDescriptions":{"typeIdentifier":"t_stringliteral_a675d5271e48bf44b2d3a2abcbe5392d4a4159912e3d2d332a49139a8b50d538","typeString":"literal_string \"string -> string\""},"value":"string -> string"},"functionReturnParameters":47,"id":49,"nodeType":"Return","src":"264:25:1"}]},"functionSelector":"bc2d73ba","implemented":true,"kind":"function","modifiers":[],"name":"exampleFunction","nameLocation":"173:15:1","parameters":{"id":44,"nodeType":"ParameterList","parameters":[{"constant":false,"id":43,"mutability":"mutable","name":"","nameLocation":"-1:-1:-1","nodeType":"VariableDeclaration","scope":51,"src":"198:13:1","stateVariable":false,"storageLocation":"memory","typeDescriptions":{"typeIdentifier":"t_string_memory_ptr","typeString":"string"},"typeName":{"id":42,"name":"string","nodeType":"ElementaryTypeName","src":"198:6:1","typeDescriptions":{"typeIdentifier":"t_string_storage_ptr","typeString":"string"}},"visibility":"internal"}],"src":"188:29:1"},"returnParameters":{"id":47,"nodeType":"ParameterList","parameters":[{"constant":false,"id":46,"mutability":"mutable","name":"","nameLocation":"-1:-1:-1","nodeType":"VariableDeclaration","scope":51,"src":"239:13:1","stateVariable":false,"storageLocation":"memory","typeDescriptions":{"typeIdentifier":"t_string_memory_ptr","typeString":"string"},"typeName":{"id":45,"name":"string","nodeType":"ElementaryTypeName","src":"239:6:1","typeDescriptions":{"typeIdentifier":"t_string_storage_ptr","typeString":"string"}},"visibility":"internal"}],"src":"238:15:1"},"scope":72,"stateMutability":"pure","virtual":false,"visibility":"public"},{"id":61,"nodeType":"FunctionDefinition","src":"302:113:1","nodes":[],"body":{"id":60,"nodeType":"Block","src":"372:43:1","nodes":[],"statements":[{"expression":{"hexValue":"75696e74323536202d3e20737472696e67","id":58,"isConstant":false,"isLValue":false,"isPure":true,"kind":"string","lValueRequested":false,"nodeType":"Literal","src":"389:19:1","typeDescriptions":{"typeIdentifier":"t_stringliteral_56541f37aba8911ed7b3fc4c5c74297515444b42d7c1b74ff1c1abc66e2d65cd","typeString":"literal_string \"uint256 -> string\""},"value":"uint256 -> string"},"functionReturnParameters":57,"id":59,"nodeType":"Return","src":"382:26:1"}]},"functionSelector":"934bc29d","implemented":true,"kind":"function","modifiers":[],"name":"exampleFunction","nameLocation":"311:15:1","parameters":{"id":54,"nodeType":"ParameterList","parameters":[{"constant":false,"id":53,"mutability":"mutable","name":"","nameLocation":"-1:-1:-1","nodeType":"VariableDeclaration","scope":61,"src":"327:7:1","stateVariable":false,"storageLocation":"default","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"},"typeName":{"id":52,"name":"uint256","nodeType":"ElementaryTypeName","src":"327:7:1","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},"visibility":"internal"}],"src":"326:9:1"},"returnParameters":{"id":57,"nodeType":"ParameterList","parameters":[{"constant":false,"id":56,"mutability":"mutable","name":"","nameLocation":"-1:-1:-1","nodeType":"VariableDeclaration","scope":61,"src":"357:13:1","stateVariable":false,"storageLocation":"memory","typeDescriptions":{"typeIdentifier":"t_string_memory_ptr","typeString":"string"},"typeName":{"id":55,"name":"string","nodeType":"ElementaryTypeName","src":"357:6:1","typeDescriptions":{"typeIdentifier":"t_string_storage_ptr","typeString":"string"}},"visibility":"internal"}],"src":"356:15:1"},"scope":72,"stateMutability":"pure","virtual":false,"visibility":"public"},{"id":71,"nodeType":"FunctionDefinition","src":"421:91:1","nodes":[],"body":{"id":70,"nodeType":"Block","src":"485:27:1","nodes":[],"statements":[{"expression":{"hexValue":"323536","id":68,"isConstant":false,"isLValue":false,"isPure":true,"kind":"number","lValueRequested":false,"nodeType":"Literal","src":"502:3:1","typeDescriptions":{"typeIdentifier":"t_rational_256_by_1","typeString":"int_const 256"},"value":"256"},"functionReturnParameters":67,"id":69,"nodeType":"Return","src":"495:10:1"}]},"functionSelector":"31870cbc","implemented":true,"kind":"function","modifiers":[],"name":"exampleFunction","nameLocation":"430:15:1","parameters":{"id":64,"nodeType":"ParameterList","parameters":[{"constant":false,"id":63,"mutability":"mutable","name":"","nameLocation":"-1:-1:-1","nodeType":"VariableDeclaration","scope":71,"src":"446:7:1","stateVariable":false,"storageLocation":"default","typeDescriptions":{"typeIdentifier":"t_bytes32","typeString":"bytes32"},"typeName":{"id":62,"name":"bytes32","nodeType":"ElementaryTypeName","src":"446:7:1","typeDescriptions":{"typeIdentifier":"t_bytes32","typeString":"bytes32"}},"visibility":"internal"}],"src":"445:9:1"},"returnParameters":{"id":67,"nodeType":"ParameterList","parameters":[{"constant":false,"id":66,"mutability":"mutable","name":"","nameLocation":"-1:-1:-1","nodeType":"VariableDeclaration","scope":71,"src":"476:7:1","stateVariable":false,"storageLocation":"default","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"},"typeName":{"id":65,"name":"uint256","nodeType":"ElementaryTypeName","src":"476:7:1","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},"visibility":"internal"}],"src":"475:9:1"},"scope":72,"stateMutability":"pure","virtual":false,"visibility":"public"}],"abstract":false,"baseContracts":[],"canonicalName":"OverloadedContract","contractDependencies":[],"contractKind":"contract","fullyImplemented":true,"linearizedBaseContracts":[72],"name":"OverloadedContract","nameLocation":"66:18:1","scope":73,"usedErrors":[]}],"license":"MIT"},"id":1} \ No newline at end of file diff --git a/tests/contracts/out/RevertingContract.sol/RevertingContract.json b/tests/contracts/out/RevertingContract.sol/RevertingContract.json index e925485a006..4c447a28729 100644 --- a/tests/contracts/out/RevertingContract.sol/RevertingContract.json +++ b/tests/contracts/out/RevertingContract.sol/RevertingContract.json @@ -1,450 +1 @@ -{ - "abi": [ - { "type": "constructor", "inputs": [], "stateMutability": "nonpayable" }, - { - "type": "function", - "name": "inc", - "inputs": [ - { "name": "value", "type": "uint256", "internalType": "uint256" } - ], - "outputs": [{ "name": "", "type": "uint256", "internalType": "uint256" }], - "stateMutability": "pure" - }, - { "type": "event", "name": "Trigger", "inputs": [], "anonymous": false } - ], - "bytecode": { - "object": "0x608060405234801561001057600080fd5b506040517f3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d90600090a1610120806100496000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063812600df14602d575b600080fd5b603c603836600460b2565b604e565b60405190815260200160405180910390f35b6000600a821060a35760405162461bcd60e51b815260206004820152601b60248201527f63616e206f6e6c792068616e646c652076616c756573203c2031300000000000604482015260640160405180910390fd5b60ac82600160ca565b92915050565b60006020828403121560c357600080fd5b5035919050565b8082018082111560ac57634e487b7160e01b600052601160045260246000fdfea2646970667358221220ad875f460a402063be4ff63412a90d65fa24398c907d52e2a0926375442cb6f064736f6c63430008130033", - "sourceMap": "57:259:2:-:0;;;112:45;;;;;;;;;-1:-1:-1;141:9:2;;;;;;;57:259;;;;;;", - "linkReferences": {} - }, - "deployedBytecode": { - "object": "0x6080604052348015600f57600080fd5b506004361060285760003560e01c8063812600df14602d575b600080fd5b603c603836600460b2565b604e565b60405190815260200160405180910390f35b6000600a821060a35760405162461bcd60e51b815260206004820152601b60248201527f63616e206f6e6c792068616e646c652076616c756573203c2031300000000000604482015260640160405180910390fd5b60ac82600160ca565b92915050565b60006020828403121560c357600080fd5b5035919050565b8082018082111560ac57634e487b7160e01b600052601160045260246000fdfea2646970667358221220ad875f460a402063be4ff63412a90d65fa24398c907d52e2a0926375442cb6f064736f6c63430008130033", - "sourceMap": "57:259:2:-:0;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;163:151;;;;;;:::i;:::-;;:::i;:::-;;;345:25:4;;;333:2;318:18;163:151:2;;;;;;;;212:7;247:2;239:5;:10;231:50;;;;-1:-1:-1;;;231:50:2;;583:2:4;231:50:2;;;565:21:4;622:2;602:18;;;595:30;661:29;641:18;;;634:57;708:18;;231:50:2;;;;;;;;298:9;:5;306:1;298:9;:::i;:::-;291:16;163:151;-1:-1:-1;;163:151:2:o;14:180:4:-;73:6;126:2;114:9;105:7;101:23;97:32;94:52;;;142:1;139;132:12;94:52;-1:-1:-1;165:23:4;;14:180;-1:-1:-1;14:180:4:o;737:222::-;802:9;;;823:10;;;820:133;;;875:10;870:3;866:20;863:1;856:31;910:4;907:1;900:15;938:4;935:1;928:15", - "linkReferences": {} - }, - "methodIdentifiers": { "inc(uint256)": "812600df" }, - "rawMetadata": "{\"compiler\":{\"version\":\"0.8.19+commit.7dd6d404\"},\"language\":\"Solidity\",\"output\":{\"abi\":[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[],\"name\":\"Trigger\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"inc\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}],\"devdoc\":{\"kind\":\"dev\",\"methods\":{},\"version\":1},\"userdoc\":{\"kind\":\"user\",\"methods\":{},\"version\":1}},\"settings\":{\"compilationTarget\":{\"src/RevertingContract.sol\":\"RevertingContract\"},\"evmVersion\":\"paris\",\"libraries\":{},\"metadata\":{\"bytecodeHash\":\"ipfs\"},\"optimizer\":{\"enabled\":true,\"runs\":200},\"remappings\":[]},\"sources\":{\"src/RevertingContract.sol\":{\"keccak256\":\"0xb0ccab460539f08d5f40044fee3e45c26590431d6d08734acde070ca01d84e23\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://3cece4cf2b0d867fb8ef474375f8907df5412056773e20e804e12061d98d057b\",\"dweb:/ipfs/QmeLfvzWjkpA6mCt1FJyNvgKeugzJJTRSBdyDUSBCovyrb\"]}},\"version\":1}", - "metadata": { - "compiler": { "version": "0.8.19+commit.7dd6d404" }, - "language": "Solidity", - "output": { - "abi": [ - { - "inputs": [], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "inputs": [], - "type": "event", - "name": "Trigger", - "anonymous": false - }, - { - "inputs": [ - { "internalType": "uint256", "name": "value", "type": "uint256" } - ], - "stateMutability": "pure", - "type": "function", - "name": "inc", - "outputs": [ - { "internalType": "uint256", "name": "", "type": "uint256" } - ] - } - ], - "devdoc": { "kind": "dev", "methods": {}, "version": 1 }, - "userdoc": { "kind": "user", "methods": {}, "version": 1 } - }, - "settings": { - "remappings": [], - "optimizer": { "enabled": true, "runs": 200 }, - "metadata": { "bytecodeHash": "ipfs" }, - "compilationTarget": { "src/RevertingContract.sol": "RevertingContract" }, - "evmVersion": "paris", - "libraries": {} - }, - "sources": { - "src/RevertingContract.sol": { - "keccak256": "0xb0ccab460539f08d5f40044fee3e45c26590431d6d08734acde070ca01d84e23", - "urls": [ - "bzz-raw://3cece4cf2b0d867fb8ef474375f8907df5412056773e20e804e12061d98d057b", - "dweb:/ipfs/QmeLfvzWjkpA6mCt1FJyNvgKeugzJJTRSBdyDUSBCovyrb" - ], - "license": "MIT" - } - }, - "version": 1 - }, - "ast": { - "absolutePath": "src/RevertingContract.sol", - "id": 104, - "exportedSymbols": { "RevertingContract": [103] }, - "nodeType": "SourceUnit", - "src": "32:285:2", - "nodes": [ - { - "id": 74, - "nodeType": "PragmaDirective", - "src": "32:23:2", - "nodes": [], - "literals": ["solidity", "^", "0.8", ".0"] - }, - { - "id": 103, - "nodeType": "ContractDefinition", - "src": "57:259:2", - "nodes": [ - { - "id": 76, - "nodeType": "EventDefinition", - "src": "90:16:2", - "nodes": [], - "anonymous": false, - "eventSelector": "3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d", - "name": "Trigger", - "nameLocation": "96:7:2", - "parameters": { - "id": 75, - "nodeType": "ParameterList", - "parameters": [], - "src": "103:2:2" - } - }, - { - "id": 83, - "nodeType": "FunctionDefinition", - "src": "112:45:2", - "nodes": [], - "body": { - "id": 82, - "nodeType": "Block", - "src": "126:31:2", - "nodes": [], - "statements": [ - { - "eventCall": { - "arguments": [], - "expression": { - "argumentTypes": [], - "id": 79, - "name": "Trigger", - "nodeType": "Identifier", - "overloadedDeclarations": [], - "referencedDeclaration": 76, - "src": "141:7:2", - "typeDescriptions": { - "typeIdentifier": "t_function_event_nonpayable$__$returns$__$", - "typeString": "function ()" - } - }, - "id": 80, - "isConstant": false, - "isLValue": false, - "isPure": false, - "kind": "functionCall", - "lValueRequested": false, - "nameLocations": [], - "names": [], - "nodeType": "FunctionCall", - "src": "141:9:2", - "tryCall": false, - "typeDescriptions": { - "typeIdentifier": "t_tuple$__$", - "typeString": "tuple()" - } - }, - "id": 81, - "nodeType": "EmitStatement", - "src": "136:14:2" - } - ] - }, - "implemented": true, - "kind": "constructor", - "modifiers": [], - "name": "", - "nameLocation": "-1:-1:-1", - "parameters": { - "id": 77, - "nodeType": "ParameterList", - "parameters": [], - "src": "123:2:2" - }, - "returnParameters": { - "id": 78, - "nodeType": "ParameterList", - "parameters": [], - "src": "126:0:2" - }, - "scope": 103, - "stateMutability": "nonpayable", - "virtual": false, - "visibility": "public" - }, - { - "id": 102, - "nodeType": "FunctionDefinition", - "src": "163:151:2", - "nodes": [], - "body": { - "id": 101, - "nodeType": "Block", - "src": "221:93:2", - "nodes": [], - "statements": [ - { - "expression": { - "arguments": [ - { - "commonType": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - }, - "id": 93, - "isConstant": false, - "isLValue": false, - "isPure": false, - "lValueRequested": false, - "leftExpression": { - "id": 91, - "name": "value", - "nodeType": "Identifier", - "overloadedDeclarations": [], - "referencedDeclaration": 85, - "src": "239:5:2", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - "nodeType": "BinaryOperation", - "operator": "<", - "rightExpression": { - "hexValue": "3130", - "id": 92, - "isConstant": false, - "isLValue": false, - "isPure": true, - "kind": "number", - "lValueRequested": false, - "nodeType": "Literal", - "src": "247:2:2", - "typeDescriptions": { - "typeIdentifier": "t_rational_10_by_1", - "typeString": "int_const 10" - }, - "value": "10" - }, - "src": "239:10:2", - "typeDescriptions": { - "typeIdentifier": "t_bool", - "typeString": "bool" - } - }, - { - "hexValue": "63616e206f6e6c792068616e646c652076616c756573203c203130", - "id": 94, - "isConstant": false, - "isLValue": false, - "isPure": true, - "kind": "string", - "lValueRequested": false, - "nodeType": "Literal", - "src": "251:29:2", - "typeDescriptions": { - "typeIdentifier": "t_stringliteral_578cd1fc098748633f5d7d46bba428bb3129c1e63324f2b7151699cae5146449", - "typeString": "literal_string \"can only handle values < 10\"" - }, - "value": "can only handle values < 10" - } - ], - "expression": { - "argumentTypes": [ - { "typeIdentifier": "t_bool", "typeString": "bool" }, - { - "typeIdentifier": "t_stringliteral_578cd1fc098748633f5d7d46bba428bb3129c1e63324f2b7151699cae5146449", - "typeString": "literal_string \"can only handle values < 10\"" - } - ], - "id": 90, - "name": "require", - "nodeType": "Identifier", - "overloadedDeclarations": [-18, -18], - "referencedDeclaration": -18, - "src": "231:7:2", - "typeDescriptions": { - "typeIdentifier": "t_function_require_pure$_t_bool_$_t_string_memory_ptr_$returns$__$", - "typeString": "function (bool,string memory) pure" - } - }, - "id": 95, - "isConstant": false, - "isLValue": false, - "isPure": false, - "kind": "functionCall", - "lValueRequested": false, - "nameLocations": [], - "names": [], - "nodeType": "FunctionCall", - "src": "231:50:2", - "tryCall": false, - "typeDescriptions": { - "typeIdentifier": "t_tuple$__$", - "typeString": "tuple()" - } - }, - "id": 96, - "nodeType": "ExpressionStatement", - "src": "231:50:2" - }, - { - "expression": { - "commonType": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - }, - "id": 99, - "isConstant": false, - "isLValue": false, - "isPure": false, - "lValueRequested": false, - "leftExpression": { - "id": 97, - "name": "value", - "nodeType": "Identifier", - "overloadedDeclarations": [], - "referencedDeclaration": 85, - "src": "298:5:2", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - "nodeType": "BinaryOperation", - "operator": "+", - "rightExpression": { - "hexValue": "31", - "id": 98, - "isConstant": false, - "isLValue": false, - "isPure": true, - "kind": "number", - "lValueRequested": false, - "nodeType": "Literal", - "src": "306:1:2", - "typeDescriptions": { - "typeIdentifier": "t_rational_1_by_1", - "typeString": "int_const 1" - }, - "value": "1" - }, - "src": "298:9:2", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - "functionReturnParameters": 89, - "id": 100, - "nodeType": "Return", - "src": "291:16:2" - } - ] - }, - "functionSelector": "812600df", - "implemented": true, - "kind": "function", - "modifiers": [], - "name": "inc", - "nameLocation": "172:3:2", - "parameters": { - "id": 86, - "nodeType": "ParameterList", - "parameters": [ - { - "constant": false, - "id": 85, - "mutability": "mutable", - "name": "value", - "nameLocation": "184:5:2", - "nodeType": "VariableDeclaration", - "scope": 102, - "src": "176:13:2", - "stateVariable": false, - "storageLocation": "default", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - }, - "typeName": { - "id": 84, - "name": "uint256", - "nodeType": "ElementaryTypeName", - "src": "176:7:2", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - "visibility": "internal" - } - ], - "src": "175:15:2" - }, - "returnParameters": { - "id": 89, - "nodeType": "ParameterList", - "parameters": [ - { - "constant": false, - "id": 88, - "mutability": "mutable", - "name": "", - "nameLocation": "-1:-1:-1", - "nodeType": "VariableDeclaration", - "scope": 102, - "src": "212:7:2", - "stateVariable": false, - "storageLocation": "default", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - }, - "typeName": { - "id": 87, - "name": "uint256", - "nodeType": "ElementaryTypeName", - "src": "212:7:2", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - "visibility": "internal" - } - ], - "src": "211:9:2" - }, - "scope": 103, - "stateMutability": "pure", - "virtual": false, - "visibility": "public" - } - ], - "abstract": false, - "baseContracts": [], - "canonicalName": "RevertingContract", - "contractDependencies": [], - "contractKind": "contract", - "fullyImplemented": true, - "linearizedBaseContracts": [103], - "name": "RevertingContract", - "nameLocation": "66:17:2", - "scope": 104, - "usedErrors": [] - } - ], - "license": "MIT" - }, - "id": 2 -} +{"abi":[{"type":"constructor","inputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"inc","inputs":[{"name":"value","type":"uint256","internalType":"uint256"}],"outputs":[{"name":"","type":"uint256","internalType":"uint256"}],"stateMutability":"pure"},{"type":"event","name":"Trigger","inputs":[],"anonymous":false}],"bytecode":{"object":"0x608060405234801561001057600080fd5b506040517f3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d90600090a1610120806100496000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063812600df14602d575b600080fd5b603c603836600460b2565b604e565b60405190815260200160405180910390f35b6000600a821060a35760405162461bcd60e51b815260206004820152601b60248201527f63616e206f6e6c792068616e646c652076616c756573203c2031300000000000604482015260640160405180910390fd5b60ac82600160ca565b92915050565b60006020828403121560c357600080fd5b5035919050565b8082018082111560ac57634e487b7160e01b600052601160045260246000fdfea2646970667358221220ad875f460a402063be4ff63412a90d65fa24398c907d52e2a0926375442cb6f064736f6c63430008130033","sourceMap":"57:259:2:-:0;;;112:45;;;;;;;;;-1:-1:-1;141:9:2;;;;;;;57:259;;;;;;","linkReferences":{}},"deployedBytecode":{"object":"0x6080604052348015600f57600080fd5b506004361060285760003560e01c8063812600df14602d575b600080fd5b603c603836600460b2565b604e565b60405190815260200160405180910390f35b6000600a821060a35760405162461bcd60e51b815260206004820152601b60248201527f63616e206f6e6c792068616e646c652076616c756573203c2031300000000000604482015260640160405180910390fd5b60ac82600160ca565b92915050565b60006020828403121560c357600080fd5b5035919050565b8082018082111560ac57634e487b7160e01b600052601160045260246000fdfea2646970667358221220ad875f460a402063be4ff63412a90d65fa24398c907d52e2a0926375442cb6f064736f6c63430008130033","sourceMap":"57:259:2:-:0;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;163:151;;;;;;:::i;:::-;;:::i;:::-;;;345:25:4;;;333:2;318:18;163:151:2;;;;;;;;212:7;247:2;239:5;:10;231:50;;;;-1:-1:-1;;;231:50:2;;583:2:4;231:50:2;;;565:21:4;622:2;602:18;;;595:30;661:29;641:18;;;634:57;708:18;;231:50:2;;;;;;;;298:9;:5;306:1;298:9;:::i;:::-;291:16;163:151;-1:-1:-1;;163:151:2:o;14:180:4:-;73:6;126:2;114:9;105:7;101:23;97:32;94:52;;;142:1;139;132:12;94:52;-1:-1:-1;165:23:4;;14:180;-1:-1:-1;14:180:4:o;737:222::-;802:9;;;823:10;;;820:133;;;875:10;870:3;866:20;863:1;856:31;910:4;907:1;900:15;938:4;935:1;928:15","linkReferences":{}},"methodIdentifiers":{"inc(uint256)":"812600df"},"rawMetadata":"{\"compiler\":{\"version\":\"0.8.19+commit.7dd6d404\"},\"language\":\"Solidity\",\"output\":{\"abi\":[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[],\"name\":\"Trigger\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"}],\"name\":\"inc\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"pure\",\"type\":\"function\"}],\"devdoc\":{\"kind\":\"dev\",\"methods\":{},\"version\":1},\"userdoc\":{\"kind\":\"user\",\"methods\":{},\"version\":1}},\"settings\":{\"compilationTarget\":{\"src/RevertingContract.sol\":\"RevertingContract\"},\"evmVersion\":\"paris\",\"libraries\":{},\"metadata\":{\"bytecodeHash\":\"ipfs\"},\"optimizer\":{\"enabled\":true,\"runs\":200},\"remappings\":[]},\"sources\":{\"src/RevertingContract.sol\":{\"keccak256\":\"0xb0ccab460539f08d5f40044fee3e45c26590431d6d08734acde070ca01d84e23\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://3cece4cf2b0d867fb8ef474375f8907df5412056773e20e804e12061d98d057b\",\"dweb:/ipfs/QmeLfvzWjkpA6mCt1FJyNvgKeugzJJTRSBdyDUSBCovyrb\"]}},\"version\":1}","metadata":{"compiler":{"version":"0.8.19+commit.7dd6d404"},"language":"Solidity","output":{"abi":[{"inputs":[],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[],"type":"event","name":"Trigger","anonymous":false},{"inputs":[{"internalType":"uint256","name":"value","type":"uint256"}],"stateMutability":"pure","type":"function","name":"inc","outputs":[{"internalType":"uint256","name":"","type":"uint256"}]}],"devdoc":{"kind":"dev","methods":{},"version":1},"userdoc":{"kind":"user","methods":{},"version":1}},"settings":{"remappings":[],"optimizer":{"enabled":true,"runs":200},"metadata":{"bytecodeHash":"ipfs"},"compilationTarget":{"src/RevertingContract.sol":"RevertingContract"},"evmVersion":"paris","libraries":{}},"sources":{"src/RevertingContract.sol":{"keccak256":"0xb0ccab460539f08d5f40044fee3e45c26590431d6d08734acde070ca01d84e23","urls":["bzz-raw://3cece4cf2b0d867fb8ef474375f8907df5412056773e20e804e12061d98d057b","dweb:/ipfs/QmeLfvzWjkpA6mCt1FJyNvgKeugzJJTRSBdyDUSBCovyrb"],"license":"MIT"}},"version":1},"ast":{"absolutePath":"src/RevertingContract.sol","id":104,"exportedSymbols":{"RevertingContract":[103]},"nodeType":"SourceUnit","src":"32:285:2","nodes":[{"id":74,"nodeType":"PragmaDirective","src":"32:23:2","nodes":[],"literals":["solidity","^","0.8",".0"]},{"id":103,"nodeType":"ContractDefinition","src":"57:259:2","nodes":[{"id":76,"nodeType":"EventDefinition","src":"90:16:2","nodes":[],"anonymous":false,"eventSelector":"3d53a39550e04688065827f3bb86584cb007ab9ebca7ebd528e7301c9c31eb5d","name":"Trigger","nameLocation":"96:7:2","parameters":{"id":75,"nodeType":"ParameterList","parameters":[],"src":"103:2:2"}},{"id":83,"nodeType":"FunctionDefinition","src":"112:45:2","nodes":[],"body":{"id":82,"nodeType":"Block","src":"126:31:2","nodes":[],"statements":[{"eventCall":{"arguments":[],"expression":{"argumentTypes":[],"id":79,"name":"Trigger","nodeType":"Identifier","overloadedDeclarations":[],"referencedDeclaration":76,"src":"141:7:2","typeDescriptions":{"typeIdentifier":"t_function_event_nonpayable$__$returns$__$","typeString":"function ()"}},"id":80,"isConstant":false,"isLValue":false,"isPure":false,"kind":"functionCall","lValueRequested":false,"nameLocations":[],"names":[],"nodeType":"FunctionCall","src":"141:9:2","tryCall":false,"typeDescriptions":{"typeIdentifier":"t_tuple$__$","typeString":"tuple()"}},"id":81,"nodeType":"EmitStatement","src":"136:14:2"}]},"implemented":true,"kind":"constructor","modifiers":[],"name":"","nameLocation":"-1:-1:-1","parameters":{"id":77,"nodeType":"ParameterList","parameters":[],"src":"123:2:2"},"returnParameters":{"id":78,"nodeType":"ParameterList","parameters":[],"src":"126:0:2"},"scope":103,"stateMutability":"nonpayable","virtual":false,"visibility":"public"},{"id":102,"nodeType":"FunctionDefinition","src":"163:151:2","nodes":[],"body":{"id":101,"nodeType":"Block","src":"221:93:2","nodes":[],"statements":[{"expression":{"arguments":[{"commonType":{"typeIdentifier":"t_uint256","typeString":"uint256"},"id":93,"isConstant":false,"isLValue":false,"isPure":false,"lValueRequested":false,"leftExpression":{"id":91,"name":"value","nodeType":"Identifier","overloadedDeclarations":[],"referencedDeclaration":85,"src":"239:5:2","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},"nodeType":"BinaryOperation","operator":"<","rightExpression":{"hexValue":"3130","id":92,"isConstant":false,"isLValue":false,"isPure":true,"kind":"number","lValueRequested":false,"nodeType":"Literal","src":"247:2:2","typeDescriptions":{"typeIdentifier":"t_rational_10_by_1","typeString":"int_const 10"},"value":"10"},"src":"239:10:2","typeDescriptions":{"typeIdentifier":"t_bool","typeString":"bool"}},{"hexValue":"63616e206f6e6c792068616e646c652076616c756573203c203130","id":94,"isConstant":false,"isLValue":false,"isPure":true,"kind":"string","lValueRequested":false,"nodeType":"Literal","src":"251:29:2","typeDescriptions":{"typeIdentifier":"t_stringliteral_578cd1fc098748633f5d7d46bba428bb3129c1e63324f2b7151699cae5146449","typeString":"literal_string \"can only handle values < 10\""},"value":"can only handle values < 10"}],"expression":{"argumentTypes":[{"typeIdentifier":"t_bool","typeString":"bool"},{"typeIdentifier":"t_stringliteral_578cd1fc098748633f5d7d46bba428bb3129c1e63324f2b7151699cae5146449","typeString":"literal_string \"can only handle values < 10\""}],"id":90,"name":"require","nodeType":"Identifier","overloadedDeclarations":[-18,-18],"referencedDeclaration":-18,"src":"231:7:2","typeDescriptions":{"typeIdentifier":"t_function_require_pure$_t_bool_$_t_string_memory_ptr_$returns$__$","typeString":"function (bool,string memory) pure"}},"id":95,"isConstant":false,"isLValue":false,"isPure":false,"kind":"functionCall","lValueRequested":false,"nameLocations":[],"names":[],"nodeType":"FunctionCall","src":"231:50:2","tryCall":false,"typeDescriptions":{"typeIdentifier":"t_tuple$__$","typeString":"tuple()"}},"id":96,"nodeType":"ExpressionStatement","src":"231:50:2"},{"expression":{"commonType":{"typeIdentifier":"t_uint256","typeString":"uint256"},"id":99,"isConstant":false,"isLValue":false,"isPure":false,"lValueRequested":false,"leftExpression":{"id":97,"name":"value","nodeType":"Identifier","overloadedDeclarations":[],"referencedDeclaration":85,"src":"298:5:2","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},"nodeType":"BinaryOperation","operator":"+","rightExpression":{"hexValue":"31","id":98,"isConstant":false,"isLValue":false,"isPure":true,"kind":"number","lValueRequested":false,"nodeType":"Literal","src":"306:1:2","typeDescriptions":{"typeIdentifier":"t_rational_1_by_1","typeString":"int_const 1"},"value":"1"},"src":"298:9:2","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},"functionReturnParameters":89,"id":100,"nodeType":"Return","src":"291:16:2"}]},"functionSelector":"812600df","implemented":true,"kind":"function","modifiers":[],"name":"inc","nameLocation":"172:3:2","parameters":{"id":86,"nodeType":"ParameterList","parameters":[{"constant":false,"id":85,"mutability":"mutable","name":"value","nameLocation":"184:5:2","nodeType":"VariableDeclaration","scope":102,"src":"176:13:2","stateVariable":false,"storageLocation":"default","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"},"typeName":{"id":84,"name":"uint256","nodeType":"ElementaryTypeName","src":"176:7:2","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},"visibility":"internal"}],"src":"175:15:2"},"returnParameters":{"id":89,"nodeType":"ParameterList","parameters":[{"constant":false,"id":88,"mutability":"mutable","name":"","nameLocation":"-1:-1:-1","nodeType":"VariableDeclaration","scope":102,"src":"212:7:2","stateVariable":false,"storageLocation":"default","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"},"typeName":{"id":87,"name":"uint256","nodeType":"ElementaryTypeName","src":"212:7:2","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},"visibility":"internal"}],"src":"211:9:2"},"scope":103,"stateMutability":"pure","virtual":false,"visibility":"public"}],"abstract":false,"baseContracts":[],"canonicalName":"RevertingContract","contractDependencies":[],"contractKind":"contract","fullyImplemented":true,"linearizedBaseContracts":[103],"name":"RevertingContract","nameLocation":"66:17:2","scope":104,"usedErrors":[]}],"license":"MIT"},"id":2} \ No newline at end of file diff --git a/tests/contracts/out/SimpleContract.sol/SimpleContract.json b/tests/contracts/out/SimpleContract.sol/SimpleContract.json index 4839740968c..57eb93d7eee 100644 --- a/tests/contracts/out/SimpleContract.sol/SimpleContract.json +++ b/tests/contracts/out/SimpleContract.sol/SimpleContract.json @@ -1,845 +1 @@ -{ - "abi": [ - { "type": "constructor", "inputs": [], "stateMutability": "nonpayable" }, - { - "type": "function", - "name": "emitAnotherTrigger", - "inputs": [ - { "name": "a", "type": "uint256", "internalType": "uint256" }, - { "name": "b", "type": "uint256", "internalType": "uint256" }, - { "name": "c", "type": "uint256", "internalType": "uint256" }, - { "name": "data", "type": "string", "internalType": "string" } - ], - "outputs": [], - "stateMutability": "nonpayable" - }, - { - "type": "function", - "name": "emitTrigger", - "inputs": [{ "name": "x", "type": "uint16", "internalType": "uint16" }], - "outputs": [], - "stateMutability": "nonpayable" - }, - { - "type": "event", - "name": "AnotherTrigger", - "inputs": [ - { - "name": "a", - "type": "uint256", - "indexed": true, - "internalType": "uint256" - }, - { - "name": "b", - "type": "uint256", - "indexed": true, - "internalType": "uint256" - }, - { - "name": "c", - "type": "uint256", - "indexed": true, - "internalType": "uint256" - }, - { - "name": "data", - "type": "string", - "indexed": false, - "internalType": "string" - } - ], - "anonymous": false - }, - { - "type": "event", - "name": "Trigger", - "inputs": [ - { - "name": "x", - "type": "uint16", - "indexed": false, - "internalType": "uint16" - } - ], - "anonymous": false - } - ], - "bytecode": { - "object": "0x608060405234801561001057600080fd5b50604051600081527f166a7d625edff952ff346d1bca4edef10254353f72916b7fb072d55d0f97b5449060200160405180910390a1610270806100546000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806316d04e0d1461003b578063931919ea14610050575b600080fd5b61004e6100493660046100dd565b610063565b005b61004e61005e36600461011e565b61009d565b60405161ffff821681527f166a7d625edff952ff346d1bca4edef10254353f72916b7fb072d55d0f97b5449060200160405180910390a150565b8183857f2cb351db58390c313534745d80b5f0abff9230502a6374a97b9caa76b31c5d8a846040516100cf91906101ec565b60405180910390a450505050565b6000602082840312156100ef57600080fd5b813561ffff8116811461010157600080fd5b9392505050565b634e487b7160e01b600052604160045260246000fd5b6000806000806080858703121561013457600080fd5b843593506020850135925060408501359150606085013567ffffffffffffffff8082111561016157600080fd5b818701915087601f83011261017557600080fd5b81358181111561018757610187610108565b604051601f8201601f19908116603f011681019083821181831017156101af576101af610108565b816040528281528a60208487010111156101c857600080fd5b82602086016020830137600060208483010152809550505050505092959194509250565b600060208083528351808285015260005b81811015610219578581018301518582016040015282016101fd565b506000604082860101526040601f19601f830116850101925050509291505056fea264697066735822122051969b527a63ab67686e528eb2de0bd24f1a84835193586c0318cfb81b2cb0ac64736f6c63430008130033", - "sourceMap": "57:596:0:-:0;;;308:46;;;;;;;;;-1:-1:-1;337:10:0;;345:1;167:38:1;;337:10:0;;155:2:1;140:18;337:10:0;;;;;;;57:596;;;;;;", - "linkReferences": {} - }, - "deployedBytecode": { - "object": "0x608060405234801561001057600080fd5b50600436106100365760003560e01c806316d04e0d1461003b578063931919ea14610050575b600080fd5b61004e6100493660046100dd565b610063565b005b61004e61005e36600461011e565b61009d565b60405161ffff821681527f166a7d625edff952ff346d1bca4edef10254353f72916b7fb072d55d0f97b5449060200160405180910390a150565b8183857f2cb351db58390c313534745d80b5f0abff9230502a6374a97b9caa76b31c5d8a846040516100cf91906101ec565b60405180910390a450505050565b6000602082840312156100ef57600080fd5b813561ffff8116811461010157600080fd5b9392505050565b634e487b7160e01b600052604160045260246000fd5b6000806000806080858703121561013457600080fd5b843593506020850135925060408501359150606085013567ffffffffffffffff8082111561016157600080fd5b818701915087601f83011261017557600080fd5b81358181111561018757610187610108565b604051601f8201601f19908116603f011681019083821181831017156101af576101af610108565b816040528281528a60208487010111156101c857600080fd5b82602086016020830137600060208483010152809550505050505092959194509250565b600060208083528351808285015260005b81811015610219578581018301518582016040015282016101fd565b506000604082860101526040601f19601f830116850101925050509291505056fea264697066735822122051969b527a63ab67686e528eb2de0bd24f1a84835193586c0318cfb81b2cb0ac64736f6c63430008130033", - "sourceMap": "57:596:0:-:0;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;360:70;;;;;;:::i;:::-;;:::i;:::-;;474:177;;;;;;:::i;:::-;;:::i;360:70::-;413:10;;1729:6:1;1717:19;;1699:38;;413:10:0;;1687:2:1;1672:18;413:10:0;;;;;;;360:70;:::o;474:177::-;636:1;633;630;615:29;639:4;615:29;;;;;;:::i;:::-;;;;;;;;474:177;;;;:::o;14:272:1:-;72:6;125:2;113:9;104:7;100:23;96:32;93:52;;;141:1;138;131:12;93:52;180:9;167:23;230:6;223:5;219:18;212:5;209:29;199:57;;252:1;249;242:12;199:57;275:5;14:272;-1:-1:-1;;;14:272:1:o;291:127::-;352:10;347:3;343:20;340:1;333:31;383:4;380:1;373:15;407:4;404:1;397:15;423:1127;519:6;527;535;543;596:3;584:9;575:7;571:23;567:33;564:53;;;613:1;610;603:12;564:53;649:9;636:23;626:33;;706:2;695:9;691:18;678:32;668:42;;757:2;746:9;742:18;729:32;719:42;;812:2;801:9;797:18;784:32;835:18;876:2;868:6;865:14;862:34;;;892:1;889;882:12;862:34;930:6;919:9;915:22;905:32;;975:7;968:4;964:2;960:13;956:27;946:55;;997:1;994;987:12;946:55;1033:2;1020:16;1055:2;1051;1048:10;1045:36;;;1061:18;;:::i;:::-;1136:2;1130:9;1104:2;1190:13;;-1:-1:-1;;1186:22:1;;;1210:2;1182:31;1178:40;1166:53;;;1234:18;;;1254:22;;;1231:46;1228:72;;;1280:18;;:::i;:::-;1320:10;1316:2;1309:22;1355:2;1347:6;1340:18;1395:7;1390:2;1385;1381;1377:11;1373:20;1370:33;1367:53;;;1416:1;1413;1406:12;1367:53;1472:2;1467;1463;1459:11;1454:2;1446:6;1442:15;1429:46;1517:1;1512:2;1507;1499:6;1495:15;1491:24;1484:35;1538:6;1528:16;;;;;;;423:1127;;;;;;;:::o;1748:548::-;1860:4;1889:2;1918;1907:9;1900:21;1950:6;1944:13;1993:6;1988:2;1977:9;1973:18;1966:34;2018:1;2028:140;2042:6;2039:1;2036:13;2028:140;;;2137:14;;;2133:23;;2127:30;2103:17;;;2122:2;2099:26;2092:66;2057:10;;2028:140;;;2032:3;2217:1;2212:2;2203:6;2192:9;2188:22;2184:31;2177:42;2287:2;2280;2276:7;2271:2;2263:6;2259:15;2255:29;2244:9;2240:45;2236:54;2228:62;;;;1748:548;;;;:::o", - "linkReferences": {} - }, - "methodIdentifiers": { - "emitAnotherTrigger(uint256,uint256,uint256,string)": "931919ea", - "emitTrigger(uint16)": "16d04e0d" - }, - "rawMetadata": "{\"compiler\":{\"version\":\"0.8.19+commit.7dd6d404\"},\"language\":\"Solidity\",\"output\":{\"abi\":[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"a\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"b\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"data\",\"type\":\"string\"}],\"name\":\"AnotherTrigger\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"x\",\"type\":\"uint16\"}],\"name\":\"Trigger\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"a\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"b\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"string\",\"name\":\"data\",\"type\":\"string\"}],\"name\":\"emitAnotherTrigger\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint16\",\"name\":\"x\",\"type\":\"uint16\"}],\"name\":\"emitTrigger\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}],\"devdoc\":{\"kind\":\"dev\",\"methods\":{},\"version\":1},\"userdoc\":{\"kind\":\"user\",\"methods\":{},\"version\":1}},\"settings\":{\"compilationTarget\":{\"src/SimpleContract.sol\":\"SimpleContract\"},\"evmVersion\":\"paris\",\"libraries\":{},\"metadata\":{\"bytecodeHash\":\"ipfs\"},\"optimizer\":{\"enabled\":true,\"runs\":200},\"remappings\":[]},\"sources\":{\"src/SimpleContract.sol\":{\"keccak256\":\"0xda954fc2eb36f5f3658f71e59fdb487c6f8947efa45e5e3fb7038c7faff99de0\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://e8253c13afee68eee23965caf364c3812ca6065eac5655faf9c20d9f231b9b1d\",\"dweb:/ipfs/QmXPdwfDAMniiwJHPt2WBvaT5gK1LUK3aM81Jq5m3n8UPF\"]}},\"version\":1}", - "metadata": { - "compiler": { "version": "0.8.19+commit.7dd6d404" }, - "language": "Solidity", - "output": { - "abi": [ - { - "inputs": [], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "a", - "type": "uint256", - "indexed": true - }, - { - "internalType": "uint256", - "name": "b", - "type": "uint256", - "indexed": true - }, - { - "internalType": "uint256", - "name": "c", - "type": "uint256", - "indexed": true - }, - { - "internalType": "string", - "name": "data", - "type": "string", - "indexed": false - } - ], - "type": "event", - "name": "AnotherTrigger", - "anonymous": false - }, - { - "inputs": [ - { - "internalType": "uint16", - "name": "x", - "type": "uint16", - "indexed": false - } - ], - "type": "event", - "name": "Trigger", - "anonymous": false - }, - { - "inputs": [ - { "internalType": "uint256", "name": "a", "type": "uint256" }, - { "internalType": "uint256", "name": "b", "type": "uint256" }, - { "internalType": "uint256", "name": "c", "type": "uint256" }, - { "internalType": "string", "name": "data", "type": "string" } - ], - "stateMutability": "nonpayable", - "type": "function", - "name": "emitAnotherTrigger" - }, - { - "inputs": [ - { "internalType": "uint16", "name": "x", "type": "uint16" } - ], - "stateMutability": "nonpayable", - "type": "function", - "name": "emitTrigger" - } - ], - "devdoc": { "kind": "dev", "methods": {}, "version": 1 }, - "userdoc": { "kind": "user", "methods": {}, "version": 1 } - }, - "settings": { - "remappings": [], - "optimizer": { "enabled": true, "runs": 200 }, - "metadata": { "bytecodeHash": "ipfs" }, - "compilationTarget": { "src/SimpleContract.sol": "SimpleContract" }, - "evmVersion": "paris", - "libraries": {} - }, - "sources": { - "src/SimpleContract.sol": { - "keccak256": "0xda954fc2eb36f5f3658f71e59fdb487c6f8947efa45e5e3fb7038c7faff99de0", - "urls": [ - "bzz-raw://e8253c13afee68eee23965caf364c3812ca6065eac5655faf9c20d9f231b9b1d", - "dweb:/ipfs/QmXPdwfDAMniiwJHPt2WBvaT5gK1LUK3aM81Jq5m3n8UPF" - ], - "license": "MIT" - } - }, - "version": 1 - }, - "ast": { - "absolutePath": "src/SimpleContract.sol", - "id": 54, - "exportedSymbols": { "SimpleContract": [53] }, - "nodeType": "SourceUnit", - "src": "32:622:0", - "nodes": [ - { - "id": 1, - "nodeType": "PragmaDirective", - "src": "32:23:0", - "nodes": [], - "literals": ["solidity", "^", "0.8", ".0"] - }, - { - "id": 53, - "nodeType": "ContractDefinition", - "src": "57:596:0", - "nodes": [ - { - "id": 5, - "nodeType": "EventDefinition", - "src": "87:24:0", - "nodes": [], - "anonymous": false, - "eventSelector": "166a7d625edff952ff346d1bca4edef10254353f72916b7fb072d55d0f97b544", - "name": "Trigger", - "nameLocation": "93:7:0", - "parameters": { - "id": 4, - "nodeType": "ParameterList", - "parameters": [ - { - "constant": false, - "id": 3, - "indexed": false, - "mutability": "mutable", - "name": "x", - "nameLocation": "108:1:0", - "nodeType": "VariableDeclaration", - "scope": 5, - "src": "101:8:0", - "stateVariable": false, - "storageLocation": "default", - "typeDescriptions": { - "typeIdentifier": "t_uint16", - "typeString": "uint16" - }, - "typeName": { - "id": 2, - "name": "uint16", - "nodeType": "ElementaryTypeName", - "src": "101:6:0", - "typeDescriptions": { - "typeIdentifier": "t_uint16", - "typeString": "uint16" - } - }, - "visibility": "internal" - } - ], - "src": "100:10:0" - } - }, - { - "id": 15, - "nodeType": "EventDefinition", - "src": "173:129:0", - "nodes": [], - "anonymous": false, - "eventSelector": "2cb351db58390c313534745d80b5f0abff9230502a6374a97b9caa76b31c5d8a", - "name": "AnotherTrigger", - "nameLocation": "179:14:0", - "parameters": { - "id": 14, - "nodeType": "ParameterList", - "parameters": [ - { - "constant": false, - "id": 7, - "indexed": true, - "mutability": "mutable", - "name": "a", - "nameLocation": "219:1:0", - "nodeType": "VariableDeclaration", - "scope": 15, - "src": "203:17:0", - "stateVariable": false, - "storageLocation": "default", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - }, - "typeName": { - "id": 6, - "name": "uint256", - "nodeType": "ElementaryTypeName", - "src": "203:7:0", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - "visibility": "internal" - }, - { - "constant": false, - "id": 9, - "indexed": true, - "mutability": "mutable", - "name": "b", - "nameLocation": "246:1:0", - "nodeType": "VariableDeclaration", - "scope": 15, - "src": "230:17:0", - "stateVariable": false, - "storageLocation": "default", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - }, - "typeName": { - "id": 8, - "name": "uint256", - "nodeType": "ElementaryTypeName", - "src": "230:7:0", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - "visibility": "internal" - }, - { - "constant": false, - "id": 11, - "indexed": true, - "mutability": "mutable", - "name": "c", - "nameLocation": "273:1:0", - "nodeType": "VariableDeclaration", - "scope": 15, - "src": "257:17:0", - "stateVariable": false, - "storageLocation": "default", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - }, - "typeName": { - "id": 10, - "name": "uint256", - "nodeType": "ElementaryTypeName", - "src": "257:7:0", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - "visibility": "internal" - }, - { - "constant": false, - "id": 13, - "indexed": false, - "mutability": "mutable", - "name": "data", - "nameLocation": "291:4:0", - "nodeType": "VariableDeclaration", - "scope": 15, - "src": "284:11:0", - "stateVariable": false, - "storageLocation": "default", - "typeDescriptions": { - "typeIdentifier": "t_string_memory_ptr", - "typeString": "string" - }, - "typeName": { - "id": 12, - "name": "string", - "nodeType": "ElementaryTypeName", - "src": "284:6:0", - "typeDescriptions": { - "typeIdentifier": "t_string_storage_ptr", - "typeString": "string" - } - }, - "visibility": "internal" - } - ], - "src": "193:108:0" - } - }, - { - "id": 23, - "nodeType": "FunctionDefinition", - "src": "308:46:0", - "nodes": [], - "body": { - "id": 22, - "nodeType": "Block", - "src": "322:32:0", - "nodes": [], - "statements": [ - { - "eventCall": { - "arguments": [ - { - "hexValue": "30", - "id": 19, - "isConstant": false, - "isLValue": false, - "isPure": true, - "kind": "number", - "lValueRequested": false, - "nodeType": "Literal", - "src": "345:1:0", - "typeDescriptions": { - "typeIdentifier": "t_rational_0_by_1", - "typeString": "int_const 0" - }, - "value": "0" - } - ], - "expression": { - "argumentTypes": [ - { - "typeIdentifier": "t_rational_0_by_1", - "typeString": "int_const 0" - } - ], - "id": 18, - "name": "Trigger", - "nodeType": "Identifier", - "overloadedDeclarations": [], - "referencedDeclaration": 5, - "src": "337:7:0", - "typeDescriptions": { - "typeIdentifier": "t_function_event_nonpayable$_t_uint16_$returns$__$", - "typeString": "function (uint16)" - } - }, - "id": 20, - "isConstant": false, - "isLValue": false, - "isPure": false, - "kind": "functionCall", - "lValueRequested": false, - "nameLocations": [], - "names": [], - "nodeType": "FunctionCall", - "src": "337:10:0", - "tryCall": false, - "typeDescriptions": { - "typeIdentifier": "t_tuple$__$", - "typeString": "tuple()" - } - }, - "id": 21, - "nodeType": "EmitStatement", - "src": "332:15:0" - } - ] - }, - "implemented": true, - "kind": "constructor", - "modifiers": [], - "name": "", - "nameLocation": "-1:-1:-1", - "parameters": { - "id": 16, - "nodeType": "ParameterList", - "parameters": [], - "src": "319:2:0" - }, - "returnParameters": { - "id": 17, - "nodeType": "ParameterList", - "parameters": [], - "src": "322:0:0" - }, - "scope": 53, - "stateMutability": "nonpayable", - "virtual": false, - "visibility": "public" - }, - { - "id": 33, - "nodeType": "FunctionDefinition", - "src": "360:70:0", - "nodes": [], - "body": { - "id": 32, - "nodeType": "Block", - "src": "398:32:0", - "nodes": [], - "statements": [ - { - "eventCall": { - "arguments": [ - { - "id": 29, - "name": "x", - "nodeType": "Identifier", - "overloadedDeclarations": [], - "referencedDeclaration": 25, - "src": "421:1:0", - "typeDescriptions": { - "typeIdentifier": "t_uint16", - "typeString": "uint16" - } - } - ], - "expression": { - "argumentTypes": [ - { "typeIdentifier": "t_uint16", "typeString": "uint16" } - ], - "id": 28, - "name": "Trigger", - "nodeType": "Identifier", - "overloadedDeclarations": [], - "referencedDeclaration": 5, - "src": "413:7:0", - "typeDescriptions": { - "typeIdentifier": "t_function_event_nonpayable$_t_uint16_$returns$__$", - "typeString": "function (uint16)" - } - }, - "id": 30, - "isConstant": false, - "isLValue": false, - "isPure": false, - "kind": "functionCall", - "lValueRequested": false, - "nameLocations": [], - "names": [], - "nodeType": "FunctionCall", - "src": "413:10:0", - "tryCall": false, - "typeDescriptions": { - "typeIdentifier": "t_tuple$__$", - "typeString": "tuple()" - } - }, - "id": 31, - "nodeType": "EmitStatement", - "src": "408:15:0" - } - ] - }, - "functionSelector": "16d04e0d", - "implemented": true, - "kind": "function", - "modifiers": [], - "name": "emitTrigger", - "nameLocation": "369:11:0", - "parameters": { - "id": 26, - "nodeType": "ParameterList", - "parameters": [ - { - "constant": false, - "id": 25, - "mutability": "mutable", - "name": "x", - "nameLocation": "388:1:0", - "nodeType": "VariableDeclaration", - "scope": 33, - "src": "381:8:0", - "stateVariable": false, - "storageLocation": "default", - "typeDescriptions": { - "typeIdentifier": "t_uint16", - "typeString": "uint16" - }, - "typeName": { - "id": 24, - "name": "uint16", - "nodeType": "ElementaryTypeName", - "src": "381:6:0", - "typeDescriptions": { - "typeIdentifier": "t_uint16", - "typeString": "uint16" - } - }, - "visibility": "internal" - } - ], - "src": "380:10:0" - }, - "returnParameters": { - "id": 27, - "nodeType": "ParameterList", - "parameters": [], - "src": "398:0:0" - }, - "scope": 53, - "stateMutability": "nonpayable", - "virtual": false, - "visibility": "public" - }, - { - "id": 52, - "nodeType": "FunctionDefinition", - "src": "474:177:0", - "nodes": [], - "body": { - "id": 51, - "nodeType": "Block", - "src": "600:51:0", - "nodes": [], - "statements": [ - { - "eventCall": { - "arguments": [ - { - "id": 45, - "name": "a", - "nodeType": "Identifier", - "overloadedDeclarations": [], - "referencedDeclaration": 35, - "src": "630:1:0", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - { - "id": 46, - "name": "b", - "nodeType": "Identifier", - "overloadedDeclarations": [], - "referencedDeclaration": 37, - "src": "633:1:0", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - { - "id": 47, - "name": "c", - "nodeType": "Identifier", - "overloadedDeclarations": [], - "referencedDeclaration": 39, - "src": "636:1:0", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - { - "id": 48, - "name": "data", - "nodeType": "Identifier", - "overloadedDeclarations": [], - "referencedDeclaration": 41, - "src": "639:4:0", - "typeDescriptions": { - "typeIdentifier": "t_string_memory_ptr", - "typeString": "string memory" - } - } - ], - "expression": { - "argumentTypes": [ - { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - }, - { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - }, - { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - }, - { - "typeIdentifier": "t_string_memory_ptr", - "typeString": "string memory" - } - ], - "id": 44, - "name": "AnotherTrigger", - "nodeType": "Identifier", - "overloadedDeclarations": [], - "referencedDeclaration": 15, - "src": "615:14:0", - "typeDescriptions": { - "typeIdentifier": "t_function_event_nonpayable$_t_uint256_$_t_uint256_$_t_uint256_$_t_string_memory_ptr_$returns$__$", - "typeString": "function (uint256,uint256,uint256,string memory)" - } - }, - "id": 49, - "isConstant": false, - "isLValue": false, - "isPure": false, - "kind": "functionCall", - "lValueRequested": false, - "nameLocations": [], - "names": [], - "nodeType": "FunctionCall", - "src": "615:29:0", - "tryCall": false, - "typeDescriptions": { - "typeIdentifier": "t_tuple$__$", - "typeString": "tuple()" - } - }, - "id": 50, - "nodeType": "EmitStatement", - "src": "610:34:0" - } - ] - }, - "functionSelector": "931919ea", - "implemented": true, - "kind": "function", - "modifiers": [], - "name": "emitAnotherTrigger", - "nameLocation": "483:18:0", - "parameters": { - "id": 42, - "nodeType": "ParameterList", - "parameters": [ - { - "constant": false, - "id": 35, - "mutability": "mutable", - "name": "a", - "nameLocation": "519:1:0", - "nodeType": "VariableDeclaration", - "scope": 52, - "src": "511:9:0", - "stateVariable": false, - "storageLocation": "default", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - }, - "typeName": { - "id": 34, - "name": "uint256", - "nodeType": "ElementaryTypeName", - "src": "511:7:0", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - "visibility": "internal" - }, - { - "constant": false, - "id": 37, - "mutability": "mutable", - "name": "b", - "nameLocation": "538:1:0", - "nodeType": "VariableDeclaration", - "scope": 52, - "src": "530:9:0", - "stateVariable": false, - "storageLocation": "default", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - }, - "typeName": { - "id": 36, - "name": "uint256", - "nodeType": "ElementaryTypeName", - "src": "530:7:0", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - "visibility": "internal" - }, - { - "constant": false, - "id": 39, - "mutability": "mutable", - "name": "c", - "nameLocation": "557:1:0", - "nodeType": "VariableDeclaration", - "scope": 52, - "src": "549:9:0", - "stateVariable": false, - "storageLocation": "default", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - }, - "typeName": { - "id": 38, - "name": "uint256", - "nodeType": "ElementaryTypeName", - "src": "549:7:0", - "typeDescriptions": { - "typeIdentifier": "t_uint256", - "typeString": "uint256" - } - }, - "visibility": "internal" - }, - { - "constant": false, - "id": 41, - "mutability": "mutable", - "name": "data", - "nameLocation": "582:4:0", - "nodeType": "VariableDeclaration", - "scope": 52, - "src": "568:18:0", - "stateVariable": false, - "storageLocation": "memory", - "typeDescriptions": { - "typeIdentifier": "t_string_memory_ptr", - "typeString": "string" - }, - "typeName": { - "id": 40, - "name": "string", - "nodeType": "ElementaryTypeName", - "src": "568:6:0", - "typeDescriptions": { - "typeIdentifier": "t_string_storage_ptr", - "typeString": "string" - } - }, - "visibility": "internal" - } - ], - "src": "501:91:0" - }, - "returnParameters": { - "id": 43, - "nodeType": "ParameterList", - "parameters": [], - "src": "600:0:0" - }, - "scope": 53, - "stateMutability": "nonpayable", - "virtual": false, - "visibility": "public" - } - ], - "abstract": false, - "baseContracts": [], - "canonicalName": "SimpleContract", - "contractDependencies": [], - "contractKind": "contract", - "fullyImplemented": true, - "linearizedBaseContracts": [53], - "name": "SimpleContract", - "nameLocation": "66:14:0", - "scope": 54, - "usedErrors": [] - } - ], - "license": "MIT" - }, - "id": 0 -} +{"abi":[{"type":"constructor","inputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"emitAnotherTrigger","inputs":[{"name":"a","type":"uint256","internalType":"uint256"},{"name":"b","type":"uint256","internalType":"uint256"},{"name":"c","type":"uint256","internalType":"uint256"},{"name":"data","type":"string","internalType":"string"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"function","name":"emitTrigger","inputs":[{"name":"x","type":"uint16","internalType":"uint16"}],"outputs":[],"stateMutability":"nonpayable"},{"type":"event","name":"AnotherTrigger","inputs":[{"name":"a","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"b","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"c","type":"uint256","indexed":true,"internalType":"uint256"},{"name":"data","type":"string","indexed":false,"internalType":"string"}],"anonymous":false},{"type":"event","name":"Trigger","inputs":[{"name":"x","type":"uint16","indexed":false,"internalType":"uint16"}],"anonymous":false}],"bytecode":{"object":"0x608060405234801561001057600080fd5b50604051600081527f166a7d625edff952ff346d1bca4edef10254353f72916b7fb072d55d0f97b5449060200160405180910390a1610270806100546000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806316d04e0d1461003b578063931919ea14610050575b600080fd5b61004e6100493660046100dd565b610063565b005b61004e61005e36600461011e565b61009d565b60405161ffff821681527f166a7d625edff952ff346d1bca4edef10254353f72916b7fb072d55d0f97b5449060200160405180910390a150565b8183857f2cb351db58390c313534745d80b5f0abff9230502a6374a97b9caa76b31c5d8a846040516100cf91906101ec565b60405180910390a450505050565b6000602082840312156100ef57600080fd5b813561ffff8116811461010157600080fd5b9392505050565b634e487b7160e01b600052604160045260246000fd5b6000806000806080858703121561013457600080fd5b843593506020850135925060408501359150606085013567ffffffffffffffff8082111561016157600080fd5b818701915087601f83011261017557600080fd5b81358181111561018757610187610108565b604051601f8201601f19908116603f011681019083821181831017156101af576101af610108565b816040528281528a60208487010111156101c857600080fd5b82602086016020830137600060208483010152809550505050505092959194509250565b600060208083528351808285015260005b81811015610219578581018301518582016040015282016101fd565b506000604082860101526040601f19601f830116850101925050509291505056fea264697066735822122051969b527a63ab67686e528eb2de0bd24f1a84835193586c0318cfb81b2cb0ac64736f6c63430008130033","sourceMap":"57:596:3:-:0;;;308:46;;;;;;;;;-1:-1:-1;337:10:3;;345:1;167:38:4;;337:10:3;;155:2:4;140:18;337:10:3;;;;;;;57:596;;;;;;","linkReferences":{}},"deployedBytecode":{"object":"0x608060405234801561001057600080fd5b50600436106100365760003560e01c806316d04e0d1461003b578063931919ea14610050575b600080fd5b61004e6100493660046100dd565b610063565b005b61004e61005e36600461011e565b61009d565b60405161ffff821681527f166a7d625edff952ff346d1bca4edef10254353f72916b7fb072d55d0f97b5449060200160405180910390a150565b8183857f2cb351db58390c313534745d80b5f0abff9230502a6374a97b9caa76b31c5d8a846040516100cf91906101ec565b60405180910390a450505050565b6000602082840312156100ef57600080fd5b813561ffff8116811461010157600080fd5b9392505050565b634e487b7160e01b600052604160045260246000fd5b6000806000806080858703121561013457600080fd5b843593506020850135925060408501359150606085013567ffffffffffffffff8082111561016157600080fd5b818701915087601f83011261017557600080fd5b81358181111561018757610187610108565b604051601f8201601f19908116603f011681019083821181831017156101af576101af610108565b816040528281528a60208487010111156101c857600080fd5b82602086016020830137600060208483010152809550505050505092959194509250565b600060208083528351808285015260005b81811015610219578581018301518582016040015282016101fd565b506000604082860101526040601f19601f830116850101925050509291505056fea264697066735822122051969b527a63ab67686e528eb2de0bd24f1a84835193586c0318cfb81b2cb0ac64736f6c63430008130033","sourceMap":"57:596:3:-:0;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;360:70;;;;;;:::i;:::-;;:::i;:::-;;474:177;;;;;;:::i;:::-;;:::i;360:70::-;413:10;;1729:6:4;1717:19;;1699:38;;413:10:3;;1687:2:4;1672:18;413:10:3;;;;;;;360:70;:::o;474:177::-;636:1;633;630;615:29;639:4;615:29;;;;;;:::i;:::-;;;;;;;;474:177;;;;:::o;14:272:4:-;72:6;125:2;113:9;104:7;100:23;96:32;93:52;;;141:1;138;131:12;93:52;180:9;167:23;230:6;223:5;219:18;212:5;209:29;199:57;;252:1;249;242:12;199:57;275:5;14:272;-1:-1:-1;;;14:272:4:o;291:127::-;352:10;347:3;343:20;340:1;333:31;383:4;380:1;373:15;407:4;404:1;397:15;423:1127;519:6;527;535;543;596:3;584:9;575:7;571:23;567:33;564:53;;;613:1;610;603:12;564:53;649:9;636:23;626:33;;706:2;695:9;691:18;678:32;668:42;;757:2;746:9;742:18;729:32;719:42;;812:2;801:9;797:18;784:32;835:18;876:2;868:6;865:14;862:34;;;892:1;889;882:12;862:34;930:6;919:9;915:22;905:32;;975:7;968:4;964:2;960:13;956:27;946:55;;997:1;994;987:12;946:55;1033:2;1020:16;1055:2;1051;1048:10;1045:36;;;1061:18;;:::i;:::-;1136:2;1130:9;1104:2;1190:13;;-1:-1:-1;;1186:22:4;;;1210:2;1182:31;1178:40;1166:53;;;1234:18;;;1254:22;;;1231:46;1228:72;;;1280:18;;:::i;:::-;1320:10;1316:2;1309:22;1355:2;1347:6;1340:18;1395:7;1390:2;1385;1381;1377:11;1373:20;1370:33;1367:53;;;1416:1;1413;1406:12;1367:53;1472:2;1467;1463;1459:11;1454:2;1446:6;1442:15;1429:46;1517:1;1512:2;1507;1499:6;1495:15;1491:24;1484:35;1538:6;1528:16;;;;;;;423:1127;;;;;;;:::o;1748:548::-;1860:4;1889:2;1918;1907:9;1900:21;1950:6;1944:13;1993:6;1988:2;1977:9;1973:18;1966:34;2018:1;2028:140;2042:6;2039:1;2036:13;2028:140;;;2137:14;;;2133:23;;2127:30;2103:17;;;2122:2;2099:26;2092:66;2057:10;;2028:140;;;2032:3;2217:1;2212:2;2203:6;2192:9;2188:22;2184:31;2177:42;2287:2;2280;2276:7;2271:2;2263:6;2259:15;2255:29;2244:9;2240:45;2236:54;2228:62;;;;1748:548;;;;:::o","linkReferences":{}},"methodIdentifiers":{"emitAnotherTrigger(uint256,uint256,uint256,string)":"931919ea","emitTrigger(uint16)":"16d04e0d"},"rawMetadata":"{\"compiler\":{\"version\":\"0.8.19+commit.7dd6d404\"},\"language\":\"Solidity\",\"output\":{\"abi\":[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"a\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"b\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"string\",\"name\":\"data\",\"type\":\"string\"}],\"name\":\"AnotherTrigger\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint16\",\"name\":\"x\",\"type\":\"uint16\"}],\"name\":\"Trigger\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"a\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"b\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"c\",\"type\":\"uint256\"},{\"internalType\":\"string\",\"name\":\"data\",\"type\":\"string\"}],\"name\":\"emitAnotherTrigger\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint16\",\"name\":\"x\",\"type\":\"uint16\"}],\"name\":\"emitTrigger\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}],\"devdoc\":{\"kind\":\"dev\",\"methods\":{},\"version\":1},\"userdoc\":{\"kind\":\"user\",\"methods\":{},\"version\":1}},\"settings\":{\"compilationTarget\":{\"src/SimpleContract.sol\":\"SimpleContract\"},\"evmVersion\":\"paris\",\"libraries\":{},\"metadata\":{\"bytecodeHash\":\"ipfs\"},\"optimizer\":{\"enabled\":true,\"runs\":200},\"remappings\":[]},\"sources\":{\"src/SimpleContract.sol\":{\"keccak256\":\"0xda954fc2eb36f5f3658f71e59fdb487c6f8947efa45e5e3fb7038c7faff99de0\",\"license\":\"MIT\",\"urls\":[\"bzz-raw://e8253c13afee68eee23965caf364c3812ca6065eac5655faf9c20d9f231b9b1d\",\"dweb:/ipfs/QmXPdwfDAMniiwJHPt2WBvaT5gK1LUK3aM81Jq5m3n8UPF\"]}},\"version\":1}","metadata":{"compiler":{"version":"0.8.19+commit.7dd6d404"},"language":"Solidity","output":{"abi":[{"inputs":[],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[{"internalType":"uint256","name":"a","type":"uint256","indexed":true},{"internalType":"uint256","name":"b","type":"uint256","indexed":true},{"internalType":"uint256","name":"c","type":"uint256","indexed":true},{"internalType":"string","name":"data","type":"string","indexed":false}],"type":"event","name":"AnotherTrigger","anonymous":false},{"inputs":[{"internalType":"uint16","name":"x","type":"uint16","indexed":false}],"type":"event","name":"Trigger","anonymous":false},{"inputs":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"},{"internalType":"uint256","name":"c","type":"uint256"},{"internalType":"string","name":"data","type":"string"}],"stateMutability":"nonpayable","type":"function","name":"emitAnotherTrigger"},{"inputs":[{"internalType":"uint16","name":"x","type":"uint16"}],"stateMutability":"nonpayable","type":"function","name":"emitTrigger"}],"devdoc":{"kind":"dev","methods":{},"version":1},"userdoc":{"kind":"user","methods":{},"version":1}},"settings":{"remappings":[],"optimizer":{"enabled":true,"runs":200},"metadata":{"bytecodeHash":"ipfs"},"compilationTarget":{"src/SimpleContract.sol":"SimpleContract"},"evmVersion":"paris","libraries":{}},"sources":{"src/SimpleContract.sol":{"keccak256":"0xda954fc2eb36f5f3658f71e59fdb487c6f8947efa45e5e3fb7038c7faff99de0","urls":["bzz-raw://e8253c13afee68eee23965caf364c3812ca6065eac5655faf9c20d9f231b9b1d","dweb:/ipfs/QmXPdwfDAMniiwJHPt2WBvaT5gK1LUK3aM81Jq5m3n8UPF"],"license":"MIT"}},"version":1},"ast":{"absolutePath":"src/SimpleContract.sol","id":158,"exportedSymbols":{"SimpleContract":[157]},"nodeType":"SourceUnit","src":"32:622:3","nodes":[{"id":105,"nodeType":"PragmaDirective","src":"32:23:3","nodes":[],"literals":["solidity","^","0.8",".0"]},{"id":157,"nodeType":"ContractDefinition","src":"57:596:3","nodes":[{"id":109,"nodeType":"EventDefinition","src":"87:24:3","nodes":[],"anonymous":false,"eventSelector":"166a7d625edff952ff346d1bca4edef10254353f72916b7fb072d55d0f97b544","name":"Trigger","nameLocation":"93:7:3","parameters":{"id":108,"nodeType":"ParameterList","parameters":[{"constant":false,"id":107,"indexed":false,"mutability":"mutable","name":"x","nameLocation":"108:1:3","nodeType":"VariableDeclaration","scope":109,"src":"101:8:3","stateVariable":false,"storageLocation":"default","typeDescriptions":{"typeIdentifier":"t_uint16","typeString":"uint16"},"typeName":{"id":106,"name":"uint16","nodeType":"ElementaryTypeName","src":"101:6:3","typeDescriptions":{"typeIdentifier":"t_uint16","typeString":"uint16"}},"visibility":"internal"}],"src":"100:10:3"}},{"id":119,"nodeType":"EventDefinition","src":"173:129:3","nodes":[],"anonymous":false,"eventSelector":"2cb351db58390c313534745d80b5f0abff9230502a6374a97b9caa76b31c5d8a","name":"AnotherTrigger","nameLocation":"179:14:3","parameters":{"id":118,"nodeType":"ParameterList","parameters":[{"constant":false,"id":111,"indexed":true,"mutability":"mutable","name":"a","nameLocation":"219:1:3","nodeType":"VariableDeclaration","scope":119,"src":"203:17:3","stateVariable":false,"storageLocation":"default","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"},"typeName":{"id":110,"name":"uint256","nodeType":"ElementaryTypeName","src":"203:7:3","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},"visibility":"internal"},{"constant":false,"id":113,"indexed":true,"mutability":"mutable","name":"b","nameLocation":"246:1:3","nodeType":"VariableDeclaration","scope":119,"src":"230:17:3","stateVariable":false,"storageLocation":"default","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"},"typeName":{"id":112,"name":"uint256","nodeType":"ElementaryTypeName","src":"230:7:3","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},"visibility":"internal"},{"constant":false,"id":115,"indexed":true,"mutability":"mutable","name":"c","nameLocation":"273:1:3","nodeType":"VariableDeclaration","scope":119,"src":"257:17:3","stateVariable":false,"storageLocation":"default","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"},"typeName":{"id":114,"name":"uint256","nodeType":"ElementaryTypeName","src":"257:7:3","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},"visibility":"internal"},{"constant":false,"id":117,"indexed":false,"mutability":"mutable","name":"data","nameLocation":"291:4:3","nodeType":"VariableDeclaration","scope":119,"src":"284:11:3","stateVariable":false,"storageLocation":"default","typeDescriptions":{"typeIdentifier":"t_string_memory_ptr","typeString":"string"},"typeName":{"id":116,"name":"string","nodeType":"ElementaryTypeName","src":"284:6:3","typeDescriptions":{"typeIdentifier":"t_string_storage_ptr","typeString":"string"}},"visibility":"internal"}],"src":"193:108:3"}},{"id":127,"nodeType":"FunctionDefinition","src":"308:46:3","nodes":[],"body":{"id":126,"nodeType":"Block","src":"322:32:3","nodes":[],"statements":[{"eventCall":{"arguments":[{"hexValue":"30","id":123,"isConstant":false,"isLValue":false,"isPure":true,"kind":"number","lValueRequested":false,"nodeType":"Literal","src":"345:1:3","typeDescriptions":{"typeIdentifier":"t_rational_0_by_1","typeString":"int_const 0"},"value":"0"}],"expression":{"argumentTypes":[{"typeIdentifier":"t_rational_0_by_1","typeString":"int_const 0"}],"id":122,"name":"Trigger","nodeType":"Identifier","overloadedDeclarations":[],"referencedDeclaration":109,"src":"337:7:3","typeDescriptions":{"typeIdentifier":"t_function_event_nonpayable$_t_uint16_$returns$__$","typeString":"function (uint16)"}},"id":124,"isConstant":false,"isLValue":false,"isPure":false,"kind":"functionCall","lValueRequested":false,"nameLocations":[],"names":[],"nodeType":"FunctionCall","src":"337:10:3","tryCall":false,"typeDescriptions":{"typeIdentifier":"t_tuple$__$","typeString":"tuple()"}},"id":125,"nodeType":"EmitStatement","src":"332:15:3"}]},"implemented":true,"kind":"constructor","modifiers":[],"name":"","nameLocation":"-1:-1:-1","parameters":{"id":120,"nodeType":"ParameterList","parameters":[],"src":"319:2:3"},"returnParameters":{"id":121,"nodeType":"ParameterList","parameters":[],"src":"322:0:3"},"scope":157,"stateMutability":"nonpayable","virtual":false,"visibility":"public"},{"id":137,"nodeType":"FunctionDefinition","src":"360:70:3","nodes":[],"body":{"id":136,"nodeType":"Block","src":"398:32:3","nodes":[],"statements":[{"eventCall":{"arguments":[{"id":133,"name":"x","nodeType":"Identifier","overloadedDeclarations":[],"referencedDeclaration":129,"src":"421:1:3","typeDescriptions":{"typeIdentifier":"t_uint16","typeString":"uint16"}}],"expression":{"argumentTypes":[{"typeIdentifier":"t_uint16","typeString":"uint16"}],"id":132,"name":"Trigger","nodeType":"Identifier","overloadedDeclarations":[],"referencedDeclaration":109,"src":"413:7:3","typeDescriptions":{"typeIdentifier":"t_function_event_nonpayable$_t_uint16_$returns$__$","typeString":"function (uint16)"}},"id":134,"isConstant":false,"isLValue":false,"isPure":false,"kind":"functionCall","lValueRequested":false,"nameLocations":[],"names":[],"nodeType":"FunctionCall","src":"413:10:3","tryCall":false,"typeDescriptions":{"typeIdentifier":"t_tuple$__$","typeString":"tuple()"}},"id":135,"nodeType":"EmitStatement","src":"408:15:3"}]},"functionSelector":"16d04e0d","implemented":true,"kind":"function","modifiers":[],"name":"emitTrigger","nameLocation":"369:11:3","parameters":{"id":130,"nodeType":"ParameterList","parameters":[{"constant":false,"id":129,"mutability":"mutable","name":"x","nameLocation":"388:1:3","nodeType":"VariableDeclaration","scope":137,"src":"381:8:3","stateVariable":false,"storageLocation":"default","typeDescriptions":{"typeIdentifier":"t_uint16","typeString":"uint16"},"typeName":{"id":128,"name":"uint16","nodeType":"ElementaryTypeName","src":"381:6:3","typeDescriptions":{"typeIdentifier":"t_uint16","typeString":"uint16"}},"visibility":"internal"}],"src":"380:10:3"},"returnParameters":{"id":131,"nodeType":"ParameterList","parameters":[],"src":"398:0:3"},"scope":157,"stateMutability":"nonpayable","virtual":false,"visibility":"public"},{"id":156,"nodeType":"FunctionDefinition","src":"474:177:3","nodes":[],"body":{"id":155,"nodeType":"Block","src":"600:51:3","nodes":[],"statements":[{"eventCall":{"arguments":[{"id":149,"name":"a","nodeType":"Identifier","overloadedDeclarations":[],"referencedDeclaration":139,"src":"630:1:3","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},{"id":150,"name":"b","nodeType":"Identifier","overloadedDeclarations":[],"referencedDeclaration":141,"src":"633:1:3","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},{"id":151,"name":"c","nodeType":"Identifier","overloadedDeclarations":[],"referencedDeclaration":143,"src":"636:1:3","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},{"id":152,"name":"data","nodeType":"Identifier","overloadedDeclarations":[],"referencedDeclaration":145,"src":"639:4:3","typeDescriptions":{"typeIdentifier":"t_string_memory_ptr","typeString":"string memory"}}],"expression":{"argumentTypes":[{"typeIdentifier":"t_uint256","typeString":"uint256"},{"typeIdentifier":"t_uint256","typeString":"uint256"},{"typeIdentifier":"t_uint256","typeString":"uint256"},{"typeIdentifier":"t_string_memory_ptr","typeString":"string memory"}],"id":148,"name":"AnotherTrigger","nodeType":"Identifier","overloadedDeclarations":[],"referencedDeclaration":119,"src":"615:14:3","typeDescriptions":{"typeIdentifier":"t_function_event_nonpayable$_t_uint256_$_t_uint256_$_t_uint256_$_t_string_memory_ptr_$returns$__$","typeString":"function (uint256,uint256,uint256,string memory)"}},"id":153,"isConstant":false,"isLValue":false,"isPure":false,"kind":"functionCall","lValueRequested":false,"nameLocations":[],"names":[],"nodeType":"FunctionCall","src":"615:29:3","tryCall":false,"typeDescriptions":{"typeIdentifier":"t_tuple$__$","typeString":"tuple()"}},"id":154,"nodeType":"EmitStatement","src":"610:34:3"}]},"functionSelector":"931919ea","implemented":true,"kind":"function","modifiers":[],"name":"emitAnotherTrigger","nameLocation":"483:18:3","parameters":{"id":146,"nodeType":"ParameterList","parameters":[{"constant":false,"id":139,"mutability":"mutable","name":"a","nameLocation":"519:1:3","nodeType":"VariableDeclaration","scope":156,"src":"511:9:3","stateVariable":false,"storageLocation":"default","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"},"typeName":{"id":138,"name":"uint256","nodeType":"ElementaryTypeName","src":"511:7:3","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},"visibility":"internal"},{"constant":false,"id":141,"mutability":"mutable","name":"b","nameLocation":"538:1:3","nodeType":"VariableDeclaration","scope":156,"src":"530:9:3","stateVariable":false,"storageLocation":"default","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"},"typeName":{"id":140,"name":"uint256","nodeType":"ElementaryTypeName","src":"530:7:3","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},"visibility":"internal"},{"constant":false,"id":143,"mutability":"mutable","name":"c","nameLocation":"557:1:3","nodeType":"VariableDeclaration","scope":156,"src":"549:9:3","stateVariable":false,"storageLocation":"default","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"},"typeName":{"id":142,"name":"uint256","nodeType":"ElementaryTypeName","src":"549:7:3","typeDescriptions":{"typeIdentifier":"t_uint256","typeString":"uint256"}},"visibility":"internal"},{"constant":false,"id":145,"mutability":"mutable","name":"data","nameLocation":"582:4:3","nodeType":"VariableDeclaration","scope":156,"src":"568:18:3","stateVariable":false,"storageLocation":"memory","typeDescriptions":{"typeIdentifier":"t_string_memory_ptr","typeString":"string"},"typeName":{"id":144,"name":"string","nodeType":"ElementaryTypeName","src":"568:6:3","typeDescriptions":{"typeIdentifier":"t_string_storage_ptr","typeString":"string"}},"visibility":"internal"}],"src":"501:91:3"},"returnParameters":{"id":147,"nodeType":"ParameterList","parameters":[],"src":"600:0:3"},"scope":157,"stateMutability":"nonpayable","virtual":false,"visibility":"public"}],"abstract":false,"baseContracts":[],"canonicalName":"SimpleContract","contractDependencies":[],"contractKind":"contract","fullyImplemented":true,"linearizedBaseContracts":[157],"name":"SimpleContract","nameLocation":"66:14:3","scope":158,"usedErrors":[]}],"license":"MIT"},"id":3} \ No newline at end of file diff --git a/tests/integration-tests/multiple-subgraph-datasources/package.json b/tests/integration-tests/multiple-subgraph-datasources/package.json new file mode 100644 index 00000000000..4f69662db8e --- /dev/null +++ b/tests/integration-tests/multiple-subgraph-datasources/package.json @@ -0,0 +1,25 @@ +{ + "name": "multiple-subgraph-datasources", + "version": "0.1.0", + "scripts": { + "build-contracts": "../../common/build-contracts.sh", + "codegen": "graph codegen subgraph.yaml --skip-migrations", + "test": "yarn build-contracts && truffle test --compile-none --network test", + "create:test": "graph create test/multiple-subgraph-datasources --node $GRAPH_NODE_ADMIN_URI", + "deploy:test": "graph deploy test/multiple-subgraph-datasources --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" + }, + "devDependencies": { + "@graphprotocol/graph-cli": "0.93.4-alpha-20250105163501-f401d0c57c4ba1f1af95a928d447efd63a56ecdc", + "@graphprotocol/graph-ts": "0.36.0-alpha-20241129215038-b75cda9", + "solc": "^0.8.2" + }, + "dependencies": { + "@truffle/contract": "^4.3", + "@truffle/hdwallet-provider": "^1.2", + "apollo-fetch": "^0.7.0", + "babel-polyfill": "^6.26.0", + "babel-register": "^6.26.0", + "gluegun": "^4.6.1", + "truffle": "^5.2" + } +} \ No newline at end of file diff --git a/tests/integration-tests/multiple-subgraph-datasources/schema.graphql b/tests/integration-tests/multiple-subgraph-datasources/schema.graphql new file mode 100644 index 00000000000..569588477f6 --- /dev/null +++ b/tests/integration-tests/multiple-subgraph-datasources/schema.graphql @@ -0,0 +1,6 @@ +type AggregatedData @entity { + id: ID! + sourceA: String + sourceB: String + first: String! +} diff --git a/tests/integration-tests/multiple-subgraph-datasources/src/mapping.ts b/tests/integration-tests/multiple-subgraph-datasources/src/mapping.ts new file mode 100644 index 00000000000..649d92d3f5f --- /dev/null +++ b/tests/integration-tests/multiple-subgraph-datasources/src/mapping.ts @@ -0,0 +1,28 @@ +import { dataSource, EntityTrigger, log } from '@graphprotocol/graph-ts' +import { AggregatedData } from '../generated/schema' +import { SourceAData } from '../generated/subgraph-QmPWnNsD4m8T9EEF1ec5d8wetFxrMebggLj1efFHzdnZhx' +import { SourceBData } from '../generated/subgraph-Qma4Rk2D1w6mFiP15ZtHHx7eWkqFR426RWswreLiDanxej' + +export function handleSourceAData(data: EntityTrigger): void { + let aggregated = AggregatedData.load(data.data.id) + if (!aggregated) { + aggregated = new AggregatedData(data.data.id) + aggregated.sourceA = data.data.data + aggregated.first = 'sourceA' + } else { + aggregated.sourceA = data.data.data + } + aggregated.save() +} + +export function handleSourceBData(data: EntityTrigger): void { + let aggregated = AggregatedData.load(data.data.id) + if (!aggregated) { + aggregated = new AggregatedData(data.data.id) + aggregated.sourceB = data.data.data + aggregated.first = 'sourceB' + } else { + aggregated.sourceB = data.data.data + } + aggregated.save() +} diff --git a/tests/integration-tests/multiple-subgraph-datasources/subgraph.yaml b/tests/integration-tests/multiple-subgraph-datasources/subgraph.yaml new file mode 100644 index 00000000000..296777c578c --- /dev/null +++ b/tests/integration-tests/multiple-subgraph-datasources/subgraph.yaml @@ -0,0 +1,35 @@ +specVersion: 1.3.0 +schema: + file: ./schema.graphql +dataSources: + - kind: subgraph + name: SourceA + network: test + source: + address: 'QmPWnNsD4m8T9EEF1ec5d8wetFxrMebggLj1efFHzdnZhx' + startBlock: 0 + mapping: + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - AggregatedData + handlers: + - handler: handleSourceAData + entity: SourceAData + file: ./src/mapping.ts + + - kind: subgraph + name: SourceB + network: test + source: + address: 'Qma4Rk2D1w6mFiP15ZtHHx7eWkqFR426RWswreLiDanxej' + startBlock: 0 + mapping: + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - AggregatedData + handlers: + - handler: handleSourceBData + entity: SourceBData + file: ./src/mapping.ts diff --git a/tests/integration-tests/source-subgraph-a/abis/Contract.abi b/tests/integration-tests/source-subgraph-a/abis/Contract.abi new file mode 100644 index 00000000000..02da1a9e7f3 --- /dev/null +++ b/tests/integration-tests/source-subgraph-a/abis/Contract.abi @@ -0,0 +1,33 @@ +[ + { + "inputs": [], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint16", + "name": "x", + "type": "uint16" + } + ], + "name": "Trigger", + "type": "event" + }, + { + "inputs": [ + { + "internalType": "uint16", + "name": "x", + "type": "uint16" + } + ], + "name": "emitTrigger", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } +] diff --git a/tests/integration-tests/source-subgraph-a/package.json b/tests/integration-tests/source-subgraph-a/package.json new file mode 100644 index 00000000000..ec656588fb2 --- /dev/null +++ b/tests/integration-tests/source-subgraph-a/package.json @@ -0,0 +1,25 @@ +{ + "name": "source-subgraph-a", + "version": "0.1.0", + "scripts": { + "build-contracts": "../../common/build-contracts.sh", + "codegen": "graph codegen --skip-migrations", + "test": "yarn build-contracts && truffle test --compile-none --network test", + "create:test": "graph create test/source-subgraph-a --node $GRAPH_NODE_ADMIN_URI", + "deploy:test": "graph deploy test/source-subgraph-a --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" + }, + "devDependencies": { + "@graphprotocol/graph-cli": "0.69.0", + "@graphprotocol/graph-ts": "0.34.0", + "solc": "^0.8.2" + }, + "dependencies": { + "@truffle/contract": "^4.3", + "@truffle/hdwallet-provider": "^1.2", + "apollo-fetch": "^0.7.0", + "babel-polyfill": "^6.26.0", + "babel-register": "^6.26.0", + "gluegun": "^4.6.1", + "truffle": "^5.2" + } +} \ No newline at end of file diff --git a/tests/integration-tests/source-subgraph-a/schema.graphql b/tests/integration-tests/source-subgraph-a/schema.graphql new file mode 100644 index 00000000000..10be822d900 --- /dev/null +++ b/tests/integration-tests/source-subgraph-a/schema.graphql @@ -0,0 +1,5 @@ +type SourceAData @entity { + id: ID! + data: String! + blockNumber: BigInt! +} \ No newline at end of file diff --git a/tests/integration-tests/source-subgraph-a/src/mapping.ts b/tests/integration-tests/source-subgraph-a/src/mapping.ts new file mode 100644 index 00000000000..73e17986bf4 --- /dev/null +++ b/tests/integration-tests/source-subgraph-a/src/mapping.ts @@ -0,0 +1,9 @@ +import { ethereum } from '@graphprotocol/graph-ts' +import { SourceAData } from '../generated/schema' + +export function handleBlock(block: ethereum.Block): void { + let entity = new SourceAData(block.number.toString()) + entity.data = 'from source A' + entity.blockNumber = block.number + entity.save() +} \ No newline at end of file diff --git a/tests/integration-tests/source-subgraph-a/subgraph.yaml b/tests/integration-tests/source-subgraph-a/subgraph.yaml new file mode 100644 index 00000000000..8ac9b4a9290 --- /dev/null +++ b/tests/integration-tests/source-subgraph-a/subgraph.yaml @@ -0,0 +1,25 @@ +specVersion: 1.3.0 +description: Source Subgraph A +repository: https://github.com/graphprotocol/graph-node +schema: + file: ./schema.graphql +dataSources: + - kind: ethereum/contract + name: SimpleContract + network: test + source: + address: "0x5FbDB2315678afecb367f032d93F642f64180aa3" + abi: SimpleContract + startBlock: 0 + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - SourceAData + abis: + - name: SimpleContract + file: ./abis/Contract.abi + blockHandlers: + - handler: handleBlock + file: ./src/mapping.ts \ No newline at end of file diff --git a/tests/integration-tests/source-subgraph-b/abis/Contract.abi b/tests/integration-tests/source-subgraph-b/abis/Contract.abi new file mode 100644 index 00000000000..02da1a9e7f3 --- /dev/null +++ b/tests/integration-tests/source-subgraph-b/abis/Contract.abi @@ -0,0 +1,33 @@ +[ + { + "inputs": [], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint16", + "name": "x", + "type": "uint16" + } + ], + "name": "Trigger", + "type": "event" + }, + { + "inputs": [ + { + "internalType": "uint16", + "name": "x", + "type": "uint16" + } + ], + "name": "emitTrigger", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } +] diff --git a/tests/integration-tests/source-subgraph-b/package.json b/tests/integration-tests/source-subgraph-b/package.json new file mode 100644 index 00000000000..d56e918adbb --- /dev/null +++ b/tests/integration-tests/source-subgraph-b/package.json @@ -0,0 +1,25 @@ +{ + "name": "source-subgraph-b", + "version": "0.1.0", + "scripts": { + "build-contracts": "../../common/build-contracts.sh", + "codegen": "graph codegen --skip-migrations", + "test": "yarn build-contracts && truffle test --compile-none --network test", + "create:test": "graph create test/source-subgraph-b --node $GRAPH_NODE_ADMIN_URI", + "deploy:test": "graph deploy test/source-subgraph-b --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" + }, + "devDependencies": { + "@graphprotocol/graph-cli": "0.69.0", + "@graphprotocol/graph-ts": "0.34.0", + "solc": "^0.8.2" + }, + "dependencies": { + "@truffle/contract": "^4.3", + "@truffle/hdwallet-provider": "^1.2", + "apollo-fetch": "^0.7.0", + "babel-polyfill": "^6.26.0", + "babel-register": "^6.26.0", + "gluegun": "^4.6.1", + "truffle": "^5.2" + } +} \ No newline at end of file diff --git a/tests/integration-tests/source-subgraph-b/schema.graphql b/tests/integration-tests/source-subgraph-b/schema.graphql new file mode 100644 index 00000000000..9a84bdcbba3 --- /dev/null +++ b/tests/integration-tests/source-subgraph-b/schema.graphql @@ -0,0 +1,5 @@ +type SourceBData @entity { + id: ID! + data: String! + blockNumber: BigInt! +} \ No newline at end of file diff --git a/tests/integration-tests/source-subgraph-b/src/mapping.ts b/tests/integration-tests/source-subgraph-b/src/mapping.ts new file mode 100644 index 00000000000..19186b6caff --- /dev/null +++ b/tests/integration-tests/source-subgraph-b/src/mapping.ts @@ -0,0 +1,9 @@ +import { ethereum } from '@graphprotocol/graph-ts' +import { SourceBData } from '../generated/schema' + +export function handleBlock(block: ethereum.Block): void { + let entity = new SourceBData(block.number.toString()) + entity.data = 'from source B' + entity.blockNumber = block.number + entity.save() +} \ No newline at end of file diff --git a/tests/integration-tests/source-subgraph-b/subgraph.yaml b/tests/integration-tests/source-subgraph-b/subgraph.yaml new file mode 100644 index 00000000000..d8bae8e33fe --- /dev/null +++ b/tests/integration-tests/source-subgraph-b/subgraph.yaml @@ -0,0 +1,25 @@ +specVersion: 1.3.0 +description: Source Subgraph B +repository: https://github.com/graphprotocol/graph-node +schema: + file: ./schema.graphql +dataSources: + - kind: ethereum/contract + name: SimpleContract + network: test + source: + address: "0x5FbDB2315678afecb367f032d93F642f64180aa3" + abi: SimpleContract + startBlock: 0 + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - SourceBData + abis: + - name: SimpleContract + file: ./abis/Contract.abi + blockHandlers: + - handler: handleBlock + file: ./src/mapping.ts \ No newline at end of file diff --git a/tests/integration-tests/source-subgraph/abis/Contract.abi b/tests/integration-tests/source-subgraph/abis/Contract.abi new file mode 100644 index 00000000000..02da1a9e7f3 --- /dev/null +++ b/tests/integration-tests/source-subgraph/abis/Contract.abi @@ -0,0 +1,33 @@ +[ + { + "inputs": [], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint16", + "name": "x", + "type": "uint16" + } + ], + "name": "Trigger", + "type": "event" + }, + { + "inputs": [ + { + "internalType": "uint16", + "name": "x", + "type": "uint16" + } + ], + "name": "emitTrigger", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } +] diff --git a/tests/integration-tests/source-subgraph/package.json b/tests/integration-tests/source-subgraph/package.json new file mode 100644 index 00000000000..7bc7ab66c90 --- /dev/null +++ b/tests/integration-tests/source-subgraph/package.json @@ -0,0 +1,25 @@ +{ + "name": "source-subgraph", + "version": "0.1.0", + "scripts": { + "build-contracts": "../../common/build-contracts.sh", + "codegen": "graph codegen --skip-migrations", + "test": "yarn build-contracts && truffle test --compile-none --network test", + "create:test": "graph create test/source-subgraph --node $GRAPH_NODE_ADMIN_URI", + "deploy:test": "graph deploy test/source-subgraph --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" + }, + "devDependencies": { + "@graphprotocol/graph-cli": "0.91.0-alpha-20241129215038-b75cda9", + "@graphprotocol/graph-ts": "0.36.0-alpha-20241129215038-b75cda9", + "solc": "^0.8.2" + }, + "dependencies": { + "@truffle/contract": "^4.3", + "@truffle/hdwallet-provider": "^1.2", + "apollo-fetch": "^0.7.0", + "babel-polyfill": "^6.26.0", + "babel-register": "^6.26.0", + "gluegun": "^4.6.1", + "truffle": "^5.2" + } +} diff --git a/tests/integration-tests/source-subgraph/schema.graphql b/tests/integration-tests/source-subgraph/schema.graphql new file mode 100644 index 00000000000..15bb2a33921 --- /dev/null +++ b/tests/integration-tests/source-subgraph/schema.graphql @@ -0,0 +1,13 @@ +type Block @entity { + id: ID! + number: BigInt! + hash: Bytes! + testMessage: String +} + +type Block2 @entity { + id: ID! + number: BigInt! + hash: Bytes! + testMessage: String +} diff --git a/tests/integration-tests/source-subgraph/src/mapping.ts b/tests/integration-tests/source-subgraph/src/mapping.ts new file mode 100644 index 00000000000..ad27c43c2a3 --- /dev/null +++ b/tests/integration-tests/source-subgraph/src/mapping.ts @@ -0,0 +1,57 @@ +import { ethereum, log, store } from '@graphprotocol/graph-ts'; +import { Block, Block2 } from '../generated/schema'; +import { BigInt } from '@graphprotocol/graph-ts'; + +export function handleBlock(block: ethereum.Block): void { + log.info('handleBlock {}', [block.number.toString()]); + + let id = block.number.toString().concat('-v1'); + let blockEntity = new Block(id); + blockEntity.number = block.number; + blockEntity.hash = block.hash; + blockEntity.save(); + + let id2 = block.number.toString().concat('-v2'); + let blockEntity2 = new Block(id2); + blockEntity2.number = block.number; + blockEntity2.hash = block.hash; + blockEntity2.save(); + + let id3 = block.number.toString().concat('-v3'); + let blockEntity3 = new Block2(id3); + blockEntity3.number = block.number; + blockEntity3.hash = block.hash; + blockEntity3.save(); + + if (block.number.equals(BigInt.fromI32(1))) { + let id = 'TEST'; + let entity = new Block(id); + entity.number = block.number; + entity.hash = block.hash; + entity.testMessage = 'Created at block 1'; + log.info('Created entity at block 1', []); + entity.save(); + } + + if (block.number.equals(BigInt.fromI32(2))) { + let id = 'TEST'; + let blockEntity1 = Block.load(id); + if (blockEntity1) { + // Update the block entity + blockEntity1.testMessage = 'Updated at block 2'; + log.info('Updated entity at block 2', []); + blockEntity1.save(); + } + } + + if (block.number.equals(BigInt.fromI32(3))) { + let id = 'TEST'; + let blockEntity1 = Block.load(id); + if (blockEntity1) { + blockEntity1.testMessage = 'Deleted at block 3'; + log.info('Deleted entity at block 3', []); + blockEntity1.save(); + store.remove('Block', id); + } + } +} diff --git a/tests/integration-tests/source-subgraph/subgraph.yaml b/tests/integration-tests/source-subgraph/subgraph.yaml new file mode 100644 index 00000000000..22006e72dda --- /dev/null +++ b/tests/integration-tests/source-subgraph/subgraph.yaml @@ -0,0 +1,23 @@ +specVersion: 1.3.0 +schema: + file: ./schema.graphql +dataSources: + - kind: ethereum/contract + name: BlockHandlerTest + network: test + source: + address: "@SimpleContract@" + abi: Contract + startBlock: 1 + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + abis: + - name: Contract + file: ./abis/Contract.abi + entities: + - Call + blockHandlers: + - handler: handleBlock + file: ./src/mapping.ts \ No newline at end of file diff --git a/tests/integration-tests/subgraph-data-sources/abis/Contract.abi b/tests/integration-tests/subgraph-data-sources/abis/Contract.abi new file mode 100644 index 00000000000..9d9f56b9263 --- /dev/null +++ b/tests/integration-tests/subgraph-data-sources/abis/Contract.abi @@ -0,0 +1,15 @@ +[ + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "string", + "name": "testCommand", + "type": "string" + } + ], + "name": "TestEvent", + "type": "event" + } +] diff --git a/tests/integration-tests/subgraph-data-sources/package.json b/tests/integration-tests/subgraph-data-sources/package.json new file mode 100644 index 00000000000..d1edefb7b3a --- /dev/null +++ b/tests/integration-tests/subgraph-data-sources/package.json @@ -0,0 +1,13 @@ +{ + "name": "subgraph-data-sources", + "version": "0.1.0", + "scripts": { + "codegen": "graph codegen --skip-migrations", + "create:test": "graph create test/subgraph-data-sources --node $GRAPH_NODE_ADMIN_URI", + "deploy:test": "graph deploy test/subgraph-data-sources --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" + }, + "devDependencies": { + "@graphprotocol/graph-cli": "0.93.4-alpha-20250105163501-f401d0c57c4ba1f1af95a928d447efd63a56ecdc", + "@graphprotocol/graph-ts": "0.36.0-alpha-20241129215038-b75cda9" + } +} diff --git a/tests/integration-tests/subgraph-data-sources/schema.graphql b/tests/integration-tests/subgraph-data-sources/schema.graphql new file mode 100644 index 00000000000..18c8153f8fd --- /dev/null +++ b/tests/integration-tests/subgraph-data-sources/schema.graphql @@ -0,0 +1,6 @@ +type MirrorBlock @entity { + id: String! + number: BigInt! + hash: Bytes! + testMessage: String +} diff --git a/tests/integration-tests/subgraph-data-sources/src/mapping.ts b/tests/integration-tests/subgraph-data-sources/src/mapping.ts new file mode 100644 index 00000000000..45ecbd41076 --- /dev/null +++ b/tests/integration-tests/subgraph-data-sources/src/mapping.ts @@ -0,0 +1,33 @@ +import { Entity, log, store, BigInt, EntityTrigger, EntityOp } from '@graphprotocol/graph-ts'; +import { Block } from '../generated/subgraph-QmVz1Pt7NhgCkz4gfavmNrMhojnMT9hW81QDqVjy56ZMUP'; +import { MirrorBlock } from '../generated/schema'; + +export function handleEntity(trigger: EntityTrigger): void { + let blockEntity = trigger.data; + let id = blockEntity.id; + + if (trigger.operation === EntityOp.Remove) { + log.info('Removing block entity with id: {}', [id]); + store.remove('MirrorBlock', id); + return; + } + + let block = loadOrCreateMirrorBlock(id); + block.number = blockEntity.number; + block.hash = blockEntity.hash; + + if (blockEntity.testMessage) { + block.testMessage = blockEntity.testMessage; + } + + block.save(); +} + +export function loadOrCreateMirrorBlock(id: string): MirrorBlock { + let block = MirrorBlock.load(id); + if (!block) { + log.info('Creating new block entity with id: {}', [id]); + block = new MirrorBlock(id); + } + return block; +} diff --git a/tests/integration-tests/subgraph-data-sources/subgraph.yaml b/tests/integration-tests/subgraph-data-sources/subgraph.yaml new file mode 100644 index 00000000000..3fdc76ac089 --- /dev/null +++ b/tests/integration-tests/subgraph-data-sources/subgraph.yaml @@ -0,0 +1,21 @@ +specVersion: 1.3.0 +schema: + file: ./schema.graphql +dataSources: + - kind: subgraph + name: Contract + network: test + source: + address: 'QmVz1Pt7NhgCkz4gfavmNrMhojnMT9hW81QDqVjy56ZMUP' + startBlock: 0 + mapping: + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Gravatar + handlers: + - handler: handleEntity + entity: Block + - handler: handleEntity + entity: Block2 + file: ./src/mapping.ts diff --git a/tests/integration-tests/yarn.lock b/tests/integration-tests/yarn.lock index f81274832bf..4f7bab674d5 100644 --- a/tests/integration-tests/yarn.lock +++ b/tests/integration-tests/yarn.lock @@ -209,6 +209,18 @@ "@babel/helper-validator-identifier" "^7.22.20" to-fast-properties "^2.0.0" +"@chainsafe/is-ip@^2.0.1": + version "2.0.2" + resolved "https://registry.yarnpkg.com/@chainsafe/is-ip/-/is-ip-2.0.2.tgz#7311e7403f11d8c5cfa48111f56fcecaac37c9f6" + integrity sha512-ndGqEMG1W5WkGagaqOZHpPU172AGdxr+LD15sv3WIUvT5oCFUrG1Y0CW/v2Egwj4JXEvSibaIIIqImsm98y1nA== + +"@chainsafe/netmask@^2.0.0": + version "2.0.0" + resolved "https://registry.yarnpkg.com/@chainsafe/netmask/-/netmask-2.0.0.tgz#0d4a75f47919f65011da4327a3845c9661f1038a" + integrity sha512-I3Z+6SWUoaljh3TBzCnCxjlUyN8tA+NAk5L6m9IxvCf1BENQTePzPMis97CoN/iMW1St3WN+AWCCRp+TTBRiDg== + dependencies: + "@chainsafe/is-ip" "^2.0.1" + "@cspotcode/source-map-support@^0.8.0": version "0.8.1" resolved "https://registry.yarnpkg.com/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz#00629c35a688e05a88b1cda684fb9d5e73f000a1" @@ -662,7 +674,7 @@ "@ethersproject/properties" "^5.7.0" "@ethersproject/strings" "^5.7.0" -"@float-capital/float-subgraph-uncrashable@^0.0.0-alpha.4": +"@float-capital/float-subgraph-uncrashable@0.0.0-internal-testing.5", "@float-capital/float-subgraph-uncrashable@^0.0.0-alpha.4": version "0.0.0-internal-testing.5" resolved "https://registry.yarnpkg.com/@float-capital/float-subgraph-uncrashable/-/float-subgraph-uncrashable-0.0.0-internal-testing.5.tgz#060f98440f6e410812766c5b040952d2d02e2b73" integrity sha512-yZ0H5e3EpAYKokX/AbtplzlvSxEJY7ZfpvQyDzyODkks0hakAAlDG6fQu1SlDJMWorY7bbq1j7fCiFeTWci6TA== @@ -738,6 +750,74 @@ which "2.0.2" yaml "1.10.2" +"@graphprotocol/graph-cli@0.91.0-alpha-20241129215038-b75cda9": + version "0.91.0-alpha-20241129215038-b75cda9" + resolved "https://registry.yarnpkg.com/@graphprotocol/graph-cli/-/graph-cli-0.91.0-alpha-20241129215038-b75cda9.tgz#8d2bc63db5723bcd857b9473ba1b2e7576cd85fd" + integrity sha512-LpfQPjOkCOquTeWqeeC9MJr4eTyKspl2g8u/K8S8qe3SKzMmuHcwQfq/dgBxCbs3m+4vrDYJgDUcQNJ6W5afyw== + dependencies: + "@float-capital/float-subgraph-uncrashable" "^0.0.0-alpha.4" + "@oclif/core" "2.8.6" + "@oclif/plugin-autocomplete" "^2.3.6" + "@oclif/plugin-not-found" "^2.4.0" + "@oclif/plugin-warn-if-update-available" "^3.1.20" + "@whatwg-node/fetch" "^0.8.4" + assemblyscript "0.19.23" + binary-install-raw "0.0.13" + chalk "3.0.0" + chokidar "3.5.3" + debug "4.3.4" + docker-compose "0.23.19" + dockerode "2.5.8" + fs-extra "9.1.0" + glob "9.3.5" + gluegun "5.1.6" + graphql "15.5.0" + immutable "4.2.1" + ipfs-http-client "55.0.0" + jayson "4.0.0" + js-yaml "3.14.1" + open "8.4.2" + prettier "3.0.3" + semver "7.4.0" + sync-request "6.1.0" + tmp-promise "3.0.3" + web3-eth-abi "1.7.0" + which "2.0.2" + yaml "1.10.2" + +"@graphprotocol/graph-cli@0.93.4-alpha-20250105163501-f401d0c57c4ba1f1af95a928d447efd63a56ecdc": + version "0.93.4-alpha-20250105163501-f401d0c57c4ba1f1af95a928d447efd63a56ecdc" + resolved "https://registry.yarnpkg.com/@graphprotocol/graph-cli/-/graph-cli-0.93.4-alpha-20250105163501-f401d0c57c4ba1f1af95a928d447efd63a56ecdc.tgz#8eeab2432d69cc5d03f3a9f462b67cae73ffa7ef" + integrity sha512-+pleAuy1422Q26KCNjMd+DJvjazEb3rSRTM+Y0cRwdMJtl2qcDAXUcg9E/9z+tpCFxx61ujf7T3z04x8Tlq+Lg== + dependencies: + "@float-capital/float-subgraph-uncrashable" "0.0.0-internal-testing.5" + "@oclif/core" "4.0.34" + "@oclif/plugin-autocomplete" "^3.2.11" + "@oclif/plugin-not-found" "^3.2.29" + "@oclif/plugin-warn-if-update-available" "^3.1.24" + "@pinax/graph-networks-registry" "^0.6.5" + "@whatwg-node/fetch" "^0.10.1" + assemblyscript "0.19.23" + binary-install "^1.1.0" + chokidar "4.0.1" + debug "4.3.7" + docker-compose "1.1.0" + fs-extra "11.2.0" + glob "11.0.0" + gluegun "5.2.0" + graphql "16.9.0" + immutable "5.0.3" + jayson "4.1.3" + js-yaml "4.1.0" + kubo-rpc-client "^5.0.2" + open "10.1.0" + prettier "3.4.2" + semver "7.6.3" + tmp-promise "3.0.3" + undici "7.1.1" + web3-eth-abi "4.4.1" + yaml "2.6.1" + "@graphprotocol/graph-ts@0.34.0": version "0.34.0" resolved "https://registry.yarnpkg.com/@graphprotocol/graph-ts/-/graph-ts-0.34.0.tgz#ca47398295b114f25b412faa364b98af31fa2bb7" @@ -759,6 +839,13 @@ dependencies: assemblyscript "0.19.10" +"@graphprotocol/graph-ts@0.36.0-alpha-20241129215038-b75cda9": + version "0.36.0-alpha-20241129215038-b75cda9" + resolved "https://registry.yarnpkg.com/@graphprotocol/graph-ts/-/graph-ts-0.36.0-alpha-20241129215038-b75cda9.tgz#1e9d3fd9294114f33dc382dfdf5ce00caabaeb22" + integrity sha512-DPLx/owGh38n6HCQaxO6rk40zfYw3EYqSvyHp+s3ClMCxQET9x4/hberkOXrPaxxiPxgUTVa6ie4mwc7GTroEw== + dependencies: + assemblyscript "0.19.10" + "@graphql-tools/batch-execute@8.5.1": version "8.5.1" resolved "https://registry.yarnpkg.com/@graphql-tools/batch-execute/-/batch-execute-8.5.1.tgz#fa3321d58c64041650be44250b1ebc3aab0ba7a9" @@ -847,6 +934,139 @@ resolved "https://registry.yarnpkg.com/@graphql-typed-document-node/core/-/core-3.2.0.tgz#5f3d96ec6b2354ad6d8a28bf216a1d97b5426861" integrity sha512-mB9oAsNCm9aM3/SOv4YtBMqZbYj10R7dkq8byBqxGY/ncFwhf2oQzMV+LCRlWoDSEBJ3COiR1yeDvMtsoOsuFQ== +"@inquirer/checkbox@^4.0.4": + version "4.0.4" + resolved "https://registry.yarnpkg.com/@inquirer/checkbox/-/checkbox-4.0.4.tgz#e7335f9c23f4100f789a8fceb26417c9a74a6dee" + integrity sha512-fYAKCAcGNMdfjL6hZTRUwkIByQ8EIZCXKrIQZH7XjADnN/xvRUhj8UdBbpC4zoUzvChhkSC/zRKaP/tDs3dZpg== + dependencies: + "@inquirer/core" "^10.1.2" + "@inquirer/figures" "^1.0.9" + "@inquirer/type" "^3.0.2" + ansi-escapes "^4.3.2" + yoctocolors-cjs "^2.1.2" + +"@inquirer/confirm@^5.1.1": + version "5.1.1" + resolved "https://registry.yarnpkg.com/@inquirer/confirm/-/confirm-5.1.1.tgz#18385064b8275eb79fdba505ce527801804eea04" + integrity sha512-vVLSbGci+IKQvDOtzpPTCOiEJCNidHcAq9JYVoWTW0svb5FiwSLotkM+JXNXejfjnzVYV9n0DTBythl9+XgTxg== + dependencies: + "@inquirer/core" "^10.1.2" + "@inquirer/type" "^3.0.2" + +"@inquirer/core@^10.1.2": + version "10.1.2" + resolved "https://registry.yarnpkg.com/@inquirer/core/-/core-10.1.2.tgz#a9c5b9ed814a636e99b5c0a8ca4f1626d99fd75d" + integrity sha512-bHd96F3ezHg1mf/J0Rb4CV8ndCN0v28kUlrHqP7+ECm1C/A+paB7Xh2lbMk6x+kweQC+rZOxM/YeKikzxco8bQ== + dependencies: + "@inquirer/figures" "^1.0.9" + "@inquirer/type" "^3.0.2" + ansi-escapes "^4.3.2" + cli-width "^4.1.0" + mute-stream "^2.0.0" + signal-exit "^4.1.0" + strip-ansi "^6.0.1" + wrap-ansi "^6.2.0" + yoctocolors-cjs "^2.1.2" + +"@inquirer/editor@^4.2.1": + version "4.2.1" + resolved "https://registry.yarnpkg.com/@inquirer/editor/-/editor-4.2.1.tgz#9887e95aa28a52eb20e9e08d85cb3698ef404601" + integrity sha512-xn9aDaiP6nFa432i68JCaL302FyL6y/6EG97nAtfIPnWZ+mWPgCMLGc4XZ2QQMsZtu9q3Jd5AzBPjXh10aX9kA== + dependencies: + "@inquirer/core" "^10.1.2" + "@inquirer/type" "^3.0.2" + external-editor "^3.1.0" + +"@inquirer/expand@^4.0.4": + version "4.0.4" + resolved "https://registry.yarnpkg.com/@inquirer/expand/-/expand-4.0.4.tgz#e3b052835e48fd4ebcf71813b7eae8b03c729d1b" + integrity sha512-GYocr+BPyxKPxQ4UZyNMqZFSGKScSUc0Vk17II3J+0bDcgGsQm0KYQNooN1Q5iBfXsy3x/VWmHGh20QnzsaHwg== + dependencies: + "@inquirer/core" "^10.1.2" + "@inquirer/type" "^3.0.2" + yoctocolors-cjs "^2.1.2" + +"@inquirer/figures@^1.0.9": + version "1.0.9" + resolved "https://registry.yarnpkg.com/@inquirer/figures/-/figures-1.0.9.tgz#9d8128f8274cde4ca009ca8547337cab3f37a4a3" + integrity sha512-BXvGj0ehzrngHTPTDqUoDT3NXL8U0RxUk2zJm2A66RhCEIWdtU1v6GuUqNAgArW4PQ9CinqIWyHdQgdwOj06zQ== + +"@inquirer/input@^4.1.1": + version "4.1.1" + resolved "https://registry.yarnpkg.com/@inquirer/input/-/input-4.1.1.tgz#aea2e463087c6aae57b9801e1ae5648f50d0d22e" + integrity sha512-nAXAHQndZcXB+7CyjIW3XuQZZHbQQ0q8LX6miY6bqAWwDzNa9JUioDBYrFmOUNIsuF08o1WT/m2gbBXvBhYVxg== + dependencies: + "@inquirer/core" "^10.1.2" + "@inquirer/type" "^3.0.2" + +"@inquirer/number@^3.0.4": + version "3.0.4" + resolved "https://registry.yarnpkg.com/@inquirer/number/-/number-3.0.4.tgz#090dcac6886d0cddc255f6624b61fb4461747fee" + integrity sha512-DX7a6IXRPU0j8kr2ovf+QaaDiIf+zEKaZVzCWdLOTk7XigqSXvoh4cul7x68xp54WTQrgSnW7P1WBJDbyY3GhA== + dependencies: + "@inquirer/core" "^10.1.2" + "@inquirer/type" "^3.0.2" + +"@inquirer/password@^4.0.4": + version "4.0.4" + resolved "https://registry.yarnpkg.com/@inquirer/password/-/password-4.0.4.tgz#77891ae3ed5736607e6e942993ac40ca00411a2c" + integrity sha512-wiliQOWdjM8FnBmdIHtQV2Ca3S1+tMBUerhyjkRCv1g+4jSvEweGu9GCcvVEgKDhTBT15nrxvk5/bVrGUqSs1w== + dependencies: + "@inquirer/core" "^10.1.2" + "@inquirer/type" "^3.0.2" + ansi-escapes "^4.3.2" + +"@inquirer/prompts@^7.2.1": + version "7.2.1" + resolved "https://registry.yarnpkg.com/@inquirer/prompts/-/prompts-7.2.1.tgz#f00fbcf06998a07faebc10741efa289384529950" + integrity sha512-v2JSGri6/HXSfoGIwuKEn8sNCQK6nsB2BNpy2lSX6QH9bsECrMv93QHnj5+f+1ZWpF/VNioIV2B/PDox8EvGuQ== + dependencies: + "@inquirer/checkbox" "^4.0.4" + "@inquirer/confirm" "^5.1.1" + "@inquirer/editor" "^4.2.1" + "@inquirer/expand" "^4.0.4" + "@inquirer/input" "^4.1.1" + "@inquirer/number" "^3.0.4" + "@inquirer/password" "^4.0.4" + "@inquirer/rawlist" "^4.0.4" + "@inquirer/search" "^3.0.4" + "@inquirer/select" "^4.0.4" + +"@inquirer/rawlist@^4.0.4": + version "4.0.4" + resolved "https://registry.yarnpkg.com/@inquirer/rawlist/-/rawlist-4.0.4.tgz#d10bbd6c529cd468d3d764c19de21334a01fa6d9" + integrity sha512-IsVN2EZdNHsmFdKWx9HaXb8T/s3FlR/U1QPt9dwbSyPtjFbMTlW9CRFvnn0bm/QIsrMRD2oMZqrQpSWPQVbXXg== + dependencies: + "@inquirer/core" "^10.1.2" + "@inquirer/type" "^3.0.2" + yoctocolors-cjs "^2.1.2" + +"@inquirer/search@^3.0.4": + version "3.0.4" + resolved "https://registry.yarnpkg.com/@inquirer/search/-/search-3.0.4.tgz#fcf51a853536add37491920634a182ecc9f5524b" + integrity sha512-tSkJk2SDmC2MEdTIjknXWmCnmPr5owTs9/xjfa14ol1Oh95n6xW7SYn5fiPk4/vrJPys0ggSWiISdPze4LTa7A== + dependencies: + "@inquirer/core" "^10.1.2" + "@inquirer/figures" "^1.0.9" + "@inquirer/type" "^3.0.2" + yoctocolors-cjs "^2.1.2" + +"@inquirer/select@^4.0.4": + version "4.0.4" + resolved "https://registry.yarnpkg.com/@inquirer/select/-/select-4.0.4.tgz#026ada15754def1cd3fbc01efc56eae45ccc7de4" + integrity sha512-ZzYLuLoUzTIW9EJm++jBpRiTshGqS3Q1o5qOEQqgzaBlmdsjQr6pA4TUNkwu6OBYgM2mIRbCz6mUhFDfl/GF+w== + dependencies: + "@inquirer/core" "^10.1.2" + "@inquirer/figures" "^1.0.9" + "@inquirer/type" "^3.0.2" + ansi-escapes "^4.3.2" + yoctocolors-cjs "^2.1.2" + +"@inquirer/type@^3.0.2": + version "3.0.2" + resolved "https://registry.yarnpkg.com/@inquirer/type/-/type-3.0.2.tgz#baff9f8d70947181deb36772cd9a5b6876d3e60c" + integrity sha512-ZhQ4TvhwHZF+lGhQ2O/rsjo80XoZR5/5qhOY3t6FJuX5XBg5Be8YzYTvaUGJnc12AUGI2nr4QSUE4PhKSigx7g== + "@ipld/dag-cbor@^7.0.0": version "7.0.3" resolved "https://registry.yarnpkg.com/@ipld/dag-cbor/-/dag-cbor-7.0.3.tgz#aa31b28afb11a807c3d627828a344e5521ac4a1e" @@ -855,6 +1075,22 @@ cborg "^1.6.0" multiformats "^9.5.4" +"@ipld/dag-cbor@^9.0.0": + version "9.2.2" + resolved "https://registry.yarnpkg.com/@ipld/dag-cbor/-/dag-cbor-9.2.2.tgz#e6f5f5bd1e4f290f2285b51fc969ef806484603a" + integrity sha512-uIEOuruCqKTP50OBWwgz4Js2+LhiBQaxc57cnP71f45b1mHEAo1OCR1Zn/TbvSW/mV1x+JqhacIktkKyaYqhCw== + dependencies: + cborg "^4.0.0" + multiformats "^13.1.0" + +"@ipld/dag-json@^10.0.0": + version "10.2.3" + resolved "https://registry.yarnpkg.com/@ipld/dag-json/-/dag-json-10.2.3.tgz#bb9de2e869f1c523104c52adc89e1e8bb0db7253" + integrity sha512-itacv1j1hvYgLox2B42Msn70QLzcr0MEo5yGIENuw2SM/lQzq9bmBiMky+kDsIrsqqblKTXcHBZnnmK7D4a6ZQ== + dependencies: + cborg "^4.0.0" + multiformats "^13.1.0" + "@ipld/dag-json@^8.0.1": version "8.0.11" resolved "https://registry.yarnpkg.com/@ipld/dag-json/-/dag-json-8.0.11.tgz#8d30cc2dfacb0aef04d327465d3df91e79e8b6ce" @@ -870,6 +1106,25 @@ dependencies: multiformats "^9.5.4" +"@ipld/dag-pb@^4.0.0": + version "4.1.3" + resolved "https://registry.yarnpkg.com/@ipld/dag-pb/-/dag-pb-4.1.3.tgz#b572d7978fa548a3a9219f566a80884189261858" + integrity sha512-ueULCaaSCcD+dQga6nKiRr+RSeVgdiYiEPKVUu5iQMNYDN+9osd0KpR3UDd9uQQ+6RWuv9L34SchfEwj7YIbOA== + dependencies: + multiformats "^13.1.0" + +"@isaacs/cliui@^8.0.2": + version "8.0.2" + resolved "https://registry.yarnpkg.com/@isaacs/cliui/-/cliui-8.0.2.tgz#b37667b7bc181c168782259bab42474fbf52b550" + integrity sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA== + dependencies: + string-width "^5.1.2" + string-width-cjs "npm:string-width@^4.2.0" + strip-ansi "^7.0.1" + strip-ansi-cjs "npm:strip-ansi@^6.0.1" + wrap-ansi "^8.1.0" + wrap-ansi-cjs "npm:wrap-ansi@^7.0.0" + "@josephg/resolvable@^1.0.0": version "1.0.1" resolved "https://registry.yarnpkg.com/@josephg/resolvable/-/resolvable-1.0.1.tgz#69bc4db754d79e1a2f17a650d3466e038d94a5eb" @@ -893,6 +1148,95 @@ "@jridgewell/resolve-uri" "^3.0.3" "@jridgewell/sourcemap-codec" "^1.4.10" +"@kamilkisiela/fast-url-parser@^1.1.4": + version "1.1.4" + resolved "https://registry.yarnpkg.com/@kamilkisiela/fast-url-parser/-/fast-url-parser-1.1.4.tgz#9d68877a489107411b953c54ea65d0658b515809" + integrity sha512-gbkePEBupNydxCelHCESvFSFM8XPh1Zs/OAVRW/rKpEqPAl5PbOM90Si8mv9bvnR53uPD2s/FiRxdvSejpRJew== + +"@leichtgewicht/ip-codec@^2.0.1": + version "2.0.5" + resolved "https://registry.yarnpkg.com/@leichtgewicht/ip-codec/-/ip-codec-2.0.5.tgz#4fc56c15c580b9adb7dc3c333a134e540b44bfb1" + integrity sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw== + +"@libp2p/crypto@^5.0.0", "@libp2p/crypto@^5.0.8": + version "5.0.8" + resolved "https://registry.yarnpkg.com/@libp2p/crypto/-/crypto-5.0.8.tgz#e55236265fa5c5c07196eed595985b6218353fca" + integrity sha512-3ZxuzqMvyLXhRnjT3sjvzCCW4zkO9UKgv75KfqExP3k1Yk/Zbb+oM2z7OgnDycvLGxnRZgGwizrgnWpZvXlDEA== + dependencies: + "@libp2p/interface" "^2.3.0" + "@noble/curves" "^1.7.0" + "@noble/hashes" "^1.6.1" + asn1js "^3.0.5" + multiformats "^13.3.1" + protons-runtime "^5.5.0" + uint8arraylist "^2.4.8" + uint8arrays "^5.1.0" + +"@libp2p/interface@^2.0.0", "@libp2p/interface@^2.3.0": + version "2.3.0" + resolved "https://registry.yarnpkg.com/@libp2p/interface/-/interface-2.3.0.tgz#778638152634ad34c53d31f242a97bd139689273" + integrity sha512-lodc8jxw32fkY2m2bsS6yzzozua6EDr5rJvahJaJVC36jZWFW5sBmOW8jBoKfoZyRwgD6uoOXP39miWQhEaUcg== + dependencies: + "@multiformats/multiaddr" "^12.3.3" + it-pushable "^3.2.3" + it-stream-types "^2.0.2" + multiformats "^13.3.1" + progress-events "^1.0.1" + uint8arraylist "^2.4.8" + +"@libp2p/logger@^5.0.0": + version "5.1.5" + resolved "https://registry.yarnpkg.com/@libp2p/logger/-/logger-5.1.5.tgz#428eb626d41e5e01ff4c5ca2f5b7a1cd161402e4" + integrity sha512-Qe8B/Mja0myaArPvuI5iKVi3o2Z55Rir+RDkkEU/m9TkKDkHVFmGKnPlWDzHehi18GALjLxOsTE9TJASxjDTCA== + dependencies: + "@libp2p/interface" "^2.3.0" + "@multiformats/multiaddr" "^12.3.3" + interface-datastore "^8.3.1" + multiformats "^13.3.1" + weald "^1.0.4" + +"@libp2p/peer-id@^5.0.0": + version "5.0.9" + resolved "https://registry.yarnpkg.com/@libp2p/peer-id/-/peer-id-5.0.9.tgz#48424ae8f873cab4c60bca59143df047e3b3a388" + integrity sha512-TgWOPbU7AcUdSiHomL2wcg9eJqjoMCvCmU5eq/3fyBygTaG4BiQA/tYKuTEfeB5YPMdG1cJLmxgpk/a+ZRkY1g== + dependencies: + "@libp2p/crypto" "^5.0.8" + "@libp2p/interface" "^2.3.0" + multiformats "^13.3.1" + uint8arrays "^5.1.0" + +"@multiformats/dns@^1.0.3": + version "1.0.6" + resolved "https://registry.yarnpkg.com/@multiformats/dns/-/dns-1.0.6.tgz#b8c7de11459a02a5f4e609d35d3cdb95cb6ad152" + integrity sha512-nt/5UqjMPtyvkG9BQYdJ4GfLK3nMqGpFZOzf4hAmIa0sJh2LlS9YKXZ4FgwBDsaHvzZqR/rUFIywIc7pkHNNuw== + dependencies: + "@types/dns-packet" "^5.6.5" + buffer "^6.0.3" + dns-packet "^5.6.1" + hashlru "^2.3.0" + p-queue "^8.0.1" + progress-events "^1.0.0" + uint8arrays "^5.0.2" + +"@multiformats/multiaddr-to-uri@^10.0.1": + version "10.1.2" + resolved "https://registry.yarnpkg.com/@multiformats/multiaddr-to-uri/-/multiaddr-to-uri-10.1.2.tgz#63271c4aaf5e9e275f3a48aeb8282435e938c1b0" + integrity sha512-6sicfYRjJlHJn4bwsQancs8kXncWU4dDN/+V9sMVTYp9hi8ovWgVkK75AbAv4SfhztmmI+oufVUncQ1n+SukKQ== + dependencies: + "@multiformats/multiaddr" "^12.3.0" + +"@multiformats/multiaddr@^12.2.1", "@multiformats/multiaddr@^12.3.0", "@multiformats/multiaddr@^12.3.3": + version "12.3.4" + resolved "https://registry.yarnpkg.com/@multiformats/multiaddr/-/multiaddr-12.3.4.tgz#3dd3d7d76f95ce9c8768770e8008a99de9b7ba49" + integrity sha512-R4pEEUyWGrRo16TSflz80Yr6XNbPirix1pfPqDLXsDZ4aaIrhZ7cez9jnyRQgci6DuuqSyZAdJKV6SdxpZ7Oiw== + dependencies: + "@chainsafe/is-ip" "^2.0.1" + "@chainsafe/netmask" "^2.0.0" + "@multiformats/dns" "^1.0.3" + multiformats "^13.0.0" + uint8-varint "^2.0.1" + uint8arrays "^5.0.0" + "@noble/curves@1.3.0", "@noble/curves@~1.3.0": version "1.3.0" resolved "https://registry.yarnpkg.com/@noble/curves/-/curves-1.3.0.tgz#01be46da4fd195822dab821e72f71bf4aeec635e" @@ -900,11 +1244,23 @@ dependencies: "@noble/hashes" "1.3.3" +"@noble/curves@^1.7.0": + version "1.8.0" + resolved "https://registry.yarnpkg.com/@noble/curves/-/curves-1.8.0.tgz#fe035a23959e6aeadf695851b51a87465b5ba8f7" + integrity sha512-j84kjAbzEnQHaSIhRPUmB3/eVXu2k3dKPl2LOrR8fSOIL+89U+7lV117EWHtq/GHM3ReGHM46iRBdZfpc4HRUQ== + dependencies: + "@noble/hashes" "1.7.0" + "@noble/hashes@1.3.3", "@noble/hashes@~1.3.2": version "1.3.3" resolved "https://registry.yarnpkg.com/@noble/hashes/-/hashes-1.3.3.tgz#39908da56a4adc270147bb07968bf3b16cfe1699" integrity sha512-V7/fPHgl+jsVPXqqeOzT8egNj2iBIVt+ECeMMG8TdcnTikP3oaBtUVqpT/gYCR68aEBJSF+XbYUxStjbFMqIIA== +"@noble/hashes@1.7.0", "@noble/hashes@^1.6.1": + version "1.7.0" + resolved "https://registry.yarnpkg.com/@noble/hashes/-/hashes-1.7.0.tgz#5d9e33af2c7d04fee35de1519b80c958b2e35e39" + integrity sha512-HXydb0DgzTpDPwbVeDGCG1gIu7X6+AuU6Zl6av/E/KG8LMsvPntvq+w17CHRpKBmN6Ybdrt1eP3k4cj8DJa78w== + "@nodelib/fs.scandir@2.1.5": version "2.1.5" resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz#7619c2eb21b25483f6d167548b4cfd5a7488c3d5" @@ -961,6 +1317,30 @@ wordwrap "^1.0.0" wrap-ansi "^7.0.0" +"@oclif/core@4.0.34": + version "4.0.34" + resolved "https://registry.yarnpkg.com/@oclif/core/-/core-4.0.34.tgz#2a1d10e6383383cae5fb81662d68147cc6a0dcef" + integrity sha512-jHww7lIqyifamynDSjDNNjNOwFTQdKYeOSYaxUaoWhqXnRwacZ+pfUN4Y0L9lqSN4MQtlWM9mwnBD7FvlT9kPw== + dependencies: + ansi-escapes "^4.3.2" + ansis "^3.3.2" + clean-stack "^3.0.1" + cli-spinners "^2.9.2" + debug "^4.3.7" + ejs "^3.1.10" + get-package-type "^0.1.0" + globby "^11.1.0" + indent-string "^4.0.0" + is-wsl "^2.2.0" + lilconfig "^3.1.2" + minimatch "^9.0.5" + semver "^7.6.3" + string-width "^4.2.3" + supports-color "^8" + widest-line "^3.1.0" + wordwrap "^1.0.0" + wrap-ansi "^7.0.0" + "@oclif/core@^2.15.0": version "2.15.0" resolved "https://registry.yarnpkg.com/@oclif/core/-/core-2.15.0.tgz#f27797b30a77d13279fba88c1698fc34a0bd0d2a" @@ -995,6 +1375,30 @@ wordwrap "^1.0.0" wrap-ansi "^7.0.0" +"@oclif/core@^4": + version "4.2.0" + resolved "https://registry.yarnpkg.com/@oclif/core/-/core-4.2.0.tgz#ab78b247dfd66322d9c9376ebaf3764e3c82fefe" + integrity sha512-ETM2N/GL7W37Kv1Afv1j1Gh77CynS2ubEPP+p+MnjUXEjghNe7+bKAWhPkHnBuFAVFAqdv0qMpUAjxKLbsmbJw== + dependencies: + ansi-escapes "^4.3.2" + ansis "^3.3.2" + clean-stack "^3.0.1" + cli-spinners "^2.9.2" + debug "^4.4.0" + ejs "^3.1.10" + get-package-type "^0.1.0" + globby "^11.1.0" + indent-string "^4.0.0" + is-wsl "^2.2.0" + lilconfig "^3.1.3" + minimatch "^9.0.5" + semver "^7.6.3" + string-width "^4.2.3" + supports-color "^8" + widest-line "^3.1.0" + wordwrap "^1.0.0" + wrap-ansi "^7.0.0" + "@oclif/plugin-autocomplete@^2.3.6": version "2.3.10" resolved "https://registry.yarnpkg.com/@oclif/plugin-autocomplete/-/plugin-autocomplete-2.3.10.tgz#787f6208cdfe10ffc68ad89e9e7f1a7ad0e8987f" @@ -1004,6 +1408,16 @@ chalk "^4.1.0" debug "^4.3.4" +"@oclif/plugin-autocomplete@^3.2.11": + version "3.2.16" + resolved "https://registry.yarnpkg.com/@oclif/plugin-autocomplete/-/plugin-autocomplete-3.2.16.tgz#7f3fc40a33db0a8f8258f69f74937f4f4a62f7bf" + integrity sha512-KnfsBGUkwk9LFzeV1M2i4Sd7WPXbx2gnjHy/+h2DPQGXVQ4xg9puSIYCyXyLKXUujb+Ps5+6DNvyNdDJ/8Tytw== + dependencies: + "@oclif/core" "^4" + ansis "^3.5.2" + debug "^4.4.0" + ejs "^3.1.10" + "@oclif/plugin-not-found@^2.4.0": version "2.4.3" resolved "https://registry.yarnpkg.com/@oclif/plugin-not-found/-/plugin-not-found-2.4.3.tgz#3d24095adb0f3876cb4bcfdfdcb775086cf6d4b5" @@ -1013,6 +1427,28 @@ chalk "^4" fast-levenshtein "^3.0.0" +"@oclif/plugin-not-found@^3.2.29": + version "3.2.33" + resolved "https://registry.yarnpkg.com/@oclif/plugin-not-found/-/plugin-not-found-3.2.33.tgz#d74cd2c16eaf0f54aa0f45ae7e29c0664f8bb8ea" + integrity sha512-1RgvZ0J5KloU8TRemHxCr5MbVtr41ungnz8BBCPJn2yR5L+Eo2Lt+kpOyEeYAohjo4Tml1AHSmipUF4jKThwTw== + dependencies: + "@inquirer/prompts" "^7.2.1" + "@oclif/core" "^4" + ansis "^3.5.2" + fast-levenshtein "^3.0.0" + +"@oclif/plugin-warn-if-update-available@^3.1.20", "@oclif/plugin-warn-if-update-available@^3.1.24": + version "3.1.29" + resolved "https://registry.yarnpkg.com/@oclif/plugin-warn-if-update-available/-/plugin-warn-if-update-available-3.1.29.tgz#0e4eabce38b3167cfc56c7b5871024859138cfae" + integrity sha512-NmD6hcyBquo9TV26tnYnsbyR69VzaeMC3kqks0YT2947CSJS7smONMxpkjU2P4kLTH4Tn8/n5w/sjoegNe1jdw== + dependencies: + "@oclif/core" "^4" + ansis "^3.5.2" + debug "^4.4.0" + http-call "^5.2.2" + lodash "^4.17.21" + registry-auth-token "^5.0.3" + "@peculiar/asn1-schema@^2.3.8": version "2.3.8" resolved "https://registry.yarnpkg.com/@peculiar/asn1-schema/-/asn1-schema-2.3.8.tgz#04b38832a814e25731232dd5be883460a156da3b" @@ -1040,6 +1476,32 @@ tslib "^2.6.2" webcrypto-core "^1.7.8" +"@pinax/graph-networks-registry@^0.6.5": + version "0.6.7" + resolved "https://registry.yarnpkg.com/@pinax/graph-networks-registry/-/graph-networks-registry-0.6.7.tgz#ceb994f3b31e2943b9c9d9b09dd86eb00d067c0e" + integrity sha512-xogeCEZ50XRMxpBwE3TZjJ8RCO8Guv39gDRrrKtlpDEDEMLm0MzD3A0SQObgj7aF7qTZNRTWzsuvQdxgzw25wQ== + +"@pnpm/config.env-replace@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@pnpm/config.env-replace/-/config.env-replace-1.1.0.tgz#ab29da53df41e8948a00f2433f085f54de8b3a4c" + integrity sha512-htyl8TWnKL7K/ESFa1oW2UB5lVDxuF5DpM7tBi6Hu2LNL3mWkIzNLG6N4zoCUP1lCKNxWy/3iu8mS8MvToGd6w== + +"@pnpm/network.ca-file@^1.0.1": + version "1.0.2" + resolved "https://registry.yarnpkg.com/@pnpm/network.ca-file/-/network.ca-file-1.0.2.tgz#2ab05e09c1af0cdf2fcf5035bea1484e222f7983" + integrity sha512-YcPQ8a0jwYU9bTdJDpXjMi7Brhkr1mXsXrUJvjqM2mQDgkRiz8jFaQGOdaLxgjtUfQgZhKy/O3cG/YwmgKaxLA== + dependencies: + graceful-fs "4.2.10" + +"@pnpm/npm-conf@^2.1.0": + version "2.3.1" + resolved "https://registry.yarnpkg.com/@pnpm/npm-conf/-/npm-conf-2.3.1.tgz#bb375a571a0bd63ab0a23bece33033c683e9b6b0" + integrity sha512-c83qWb22rNRuB0UaVCI0uRPNRr8Z0FWnEIvT47jiHAmOIUHbBOg5XvV7pM5x+rKn9HRpjxquDbXYSXr3fAKFcw== + dependencies: + "@pnpm/config.env-replace" "^1.1.0" + "@pnpm/network.ca-file" "^1.0.1" + config-chain "^1.1.11" + "@protobufjs/aspromise@^1.1.1", "@protobufjs/aspromise@^1.1.2": version "1.1.2" resolved "https://registry.yarnpkg.com/@protobufjs/aspromise/-/aspromise-1.1.2.tgz#9b8b0cc663d669a7d8f6f5d0893a14d348f30fbf" @@ -1638,6 +2100,13 @@ resolved "https://registry.yarnpkg.com/@types/cors/-/cors-2.8.12.tgz#6b2c510a7ad7039e98e7b8d3d6598f4359e5c080" integrity sha512-vt+kDhq/M2ayberEtJcIN/hxXy1Pk+59g2FV/ZQceeaTyCtCucjL2Q7FXlFjtWn4n15KCr1NE2lNNFhp0lEThw== +"@types/dns-packet@^5.6.5": + version "5.6.5" + resolved "https://registry.yarnpkg.com/@types/dns-packet/-/dns-packet-5.6.5.tgz#49fc29a40f5d30227ed028fa1ee82601d3745e15" + integrity sha512-qXOC7XLOEe43ehtWJCMnQXvgcIpv6rPmQ1jXT98Ad8A3TB1Ue50jsCbSSSyuazScEuZ/Q026vHbrOTVkmwA+7Q== + dependencies: + "@types/node" "*" + "@types/express-serve-static-core@4.17.31": version "4.17.31" resolved "https://registry.yarnpkg.com/@types/express-serve-static-core/-/express-serve-static-core-4.17.31.tgz#a1139efeab4e7323834bb0226e62ac019f474b2f" @@ -1803,11 +2272,26 @@ dependencies: "@types/node" "*" +"@whatwg-node/disposablestack@^0.0.5": + version "0.0.5" + resolved "https://registry.yarnpkg.com/@whatwg-node/disposablestack/-/disposablestack-0.0.5.tgz#cd646b1ef60a36972e018ab21f412a3539c6deec" + integrity sha512-9lXugdknoIequO4OYvIjhygvfSEgnO8oASLqLelnDhkRjgBZhc39shC3QSlZuyDO9bgYSIVa2cHAiN+St3ty4w== + dependencies: + tslib "^2.6.3" + "@whatwg-node/events@^0.0.3": version "0.0.3" resolved "https://registry.yarnpkg.com/@whatwg-node/events/-/events-0.0.3.tgz#13a65dd4f5893f55280f766e29ae48074927acad" integrity sha512-IqnKIDWfXBJkvy/k6tzskWTc2NK3LcqHlb+KHGCrjOCH4jfQckRX0NAiIcC/vIqQkzLYw2r2CTSwAxcrtcD6lA== +"@whatwg-node/fetch@^0.10.1": + version "0.10.1" + resolved "https://registry.yarnpkg.com/@whatwg-node/fetch/-/fetch-0.10.1.tgz#ca08b2b9928a465f6e562d6cc460840340c15d14" + integrity sha512-gmPOLrsjSZWEZlr9Oe5+wWFBq3CG6fN13rGlM91Jsj/vZ95G9CCvrORGBAxMXy0AJGiC83aYiHXn3JzTzXQmbA== + dependencies: + "@whatwg-node/node-fetch" "^0.7.1" + urlpattern-polyfill "^10.0.0" + "@whatwg-node/fetch@^0.8.4": version "0.8.8" resolved "https://registry.yarnpkg.com/@whatwg-node/fetch/-/fetch-0.8.8.tgz#48c6ad0c6b7951a73e812f09dd22d75e9fa18cae" @@ -1830,6 +2314,17 @@ fast-url-parser "^1.1.3" tslib "^2.3.1" +"@whatwg-node/node-fetch@^0.7.1": + version "0.7.5" + resolved "https://registry.yarnpkg.com/@whatwg-node/node-fetch/-/node-fetch-0.7.5.tgz#b81e9d5f4b9032e480032c73e7bac284c4e3bdb8" + integrity sha512-t7kGrt2fdfNvzy1LCAE9/OnIyMtizgFhgJmk7iLJwQsLmR7S86F8Q4aDRPbCfo7pISJP6Fx/tPdfFNjHS23WTA== + dependencies: + "@kamilkisiela/fast-url-parser" "^1.1.4" + "@whatwg-node/disposablestack" "^0.0.5" + busboy "^1.6.0" + fast-querystring "^1.1.1" + tslib "^2.6.3" + JSONStream@1.3.2: version "1.3.2" resolved "https://registry.yarnpkg.com/JSONStream/-/JSONStream-1.3.2.tgz#c102371b6ec3a7cf3b847ca00c20bb0fce4c6dea" @@ -1846,6 +2341,11 @@ JSONStream@^1.3.5: jsonparse "^1.2.0" through ">=2.2.7 <3" +abitype@0.7.1: + version "0.7.1" + resolved "https://registry.yarnpkg.com/abitype/-/abitype-0.7.1.tgz#16db20abe67de80f6183cf75f3de1ff86453b745" + integrity sha512-VBkRHTDZf9Myaek/dO3yMmOzB/y2s3Zo6nVU7yaw1G+TvCHAjwaJzNGN9yo4K5D8bU/VZXKP1EJpRhFr862PlQ== + abort-controller@3.0.0, abort-controller@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/abort-controller/-/abort-controller-3.0.0.tgz#eaf54d53b62bae4138e809ca225c8439a6efb392" @@ -2016,6 +2516,11 @@ ansi-regex@^5.0.1: resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304" integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== +ansi-regex@^6.0.1: + version "6.1.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.1.0.tgz#95ec409c69619d6cb1b8b34f14b660ef28ebd654" + integrity sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA== + ansi-styles@^2.2.1: version "2.2.1" resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe" @@ -2035,11 +2540,21 @@ ansi-styles@^4.0.0, ansi-styles@^4.1.0, ansi-styles@^4.3.0: dependencies: color-convert "^2.0.1" +ansi-styles@^6.1.0: + version "6.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-6.2.1.tgz#0e62320cf99c21afff3b3012192546aacbfb05c5" + integrity sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug== + ansicolors@~0.3.2: version "0.3.2" resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.3.2.tgz#665597de86a9ffe3aa9bfbe6cae5c6ea426b4979" integrity sha512-QXu7BPrP29VllRxH8GwB7x5iX5qWKAAMLqKQGWTeLWVlNHNOpVMJ91dsxQAIWXpjuW5wqvxu3Jd/nRjrJ+0pqg== +ansis@^3.3.2, ansis@^3.5.2: + version "3.6.0" + resolved "https://registry.yarnpkg.com/ansis/-/ansis-3.6.0.tgz#f4d8437fb27659bf5a6adca90135801919dee764" + integrity sha512-8KluYVZM+vx19f5rInhdEBdIAjvBp7ASzyF/DoStcDpMJ3JOM55ybvUcs9nMRVP8XN2K3ABBdO7zCXezvrT0pg== + any-signal@^2.1.2: version "2.1.2" resolved "https://registry.yarnpkg.com/any-signal/-/any-signal-2.1.2.tgz#8d48270de0605f8b218cf9abe8e9c6a0e7418102" @@ -2053,6 +2568,11 @@ any-signal@^3.0.0: resolved "https://registry.yarnpkg.com/any-signal/-/any-signal-3.0.1.tgz#49cae34368187a3472e31de28fb5cb1430caa9a6" integrity sha512-xgZgJtKEa9YmDqXodIgl7Fl1C8yNXr8w6gXjqK3LW4GcEiYT+6AQfJSE/8SPsEpLLmcvbv8YU+qet94UewHxqg== +any-signal@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/any-signal/-/any-signal-4.1.1.tgz#928416c355c66899e6b2a91cad4488f0324bae03" + integrity sha512-iADenERppdC+A2YKbOXXB2WUeABLaM6qnpZ70kZbPZ1cZMMJ7eF+3CaYm+/PhBizgkzlvssC7QuHS30oOiQYWA== + anymatch@~3.1.2: version "3.1.3" resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.3.tgz#790c58b19ba1720a84205b57c618d5ad8524973e" @@ -2349,6 +2869,13 @@ axios@^0.21.1, axios@^0.21.4: dependencies: follow-redirects "^1.14.0" +axios@^0.26.1: + version "0.26.1" + resolved "https://registry.yarnpkg.com/axios/-/axios-0.26.1.tgz#1ede41c51fcf51bbbd6fd43669caaa4f0495aaa9" + integrity sha512-fPwcX4EvnSHuInCMItEhAGnaSEXRBjtzh9fOtsE6E1G6p7vl7edEeZe11QHf18+6+9gR5PbKV/sGKNaD8YaMeA== + dependencies: + follow-redirects "^1.14.8" + babel-code-frame@^6.26.0: version "6.26.0" resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.26.0.tgz#63fd43f7dc1e3bb7ce35947db8fe369a3f58c74b" @@ -2577,6 +3104,15 @@ binary-install-raw@0.0.13: rimraf "^3.0.2" tar "^6.1.0" +binary-install@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/binary-install/-/binary-install-1.1.0.tgz#61195349acabf5a043f3805b03f96e506cc96d6e" + integrity sha512-rkwNGW+3aQVSZoD0/o3mfPN6Yxh3Id0R/xzTVBVVpGNlVz8EGwusksxRlbk/A5iKTZt9zkMn3qIqmAt3vpfbzg== + dependencies: + axios "^0.26.1" + rimraf "^3.0.2" + tar "^6.1.11" + binaryen@101.0.0-nightly.20210723: version "101.0.0-nightly.20210723" resolved "https://registry.yarnpkg.com/binaryen/-/binaryen-101.0.0-nightly.20210723.tgz#b6bb7f3501341727681a03866c0856500eec3740" @@ -2607,6 +3143,13 @@ blob-to-it@^1.0.1: dependencies: browser-readablestream-to-it "^1.0.3" +blob-to-it@^2.0.5: + version "2.0.7" + resolved "https://registry.yarnpkg.com/blob-to-it/-/blob-to-it-2.0.7.tgz#637b8bb14963a7fce658ee758d9251dd1ee9fd3c" + integrity sha512-mFAR/GKDDqFOkSBB7shXfsUZwU5DgK453++I8/SImNacfJsdKlx/oHTO0T4ZYHz8A2dnSONE+CX8L29VlWGKiQ== + dependencies: + browser-readablestream-to-it "^2.0.0" + bluebird@^3.5.0, bluebird@^3.5.2: version "3.7.2" resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.7.2.tgz#9f229c15be272454ffa973ace0dbee79a1b0c36f" @@ -2672,6 +3215,13 @@ braces@^3.0.2, braces@~3.0.2: dependencies: fill-range "^7.0.1" +braces@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" + integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== + dependencies: + fill-range "^7.1.1" + brorand@^1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/brorand/-/brorand-1.1.0.tgz#12c25efe40a45e3c323eb8675a0a0ce57b22371f" @@ -2682,6 +3232,11 @@ browser-readablestream-to-it@^1.0.0, browser-readablestream-to-it@^1.0.1, browse resolved "https://registry.yarnpkg.com/browser-readablestream-to-it/-/browser-readablestream-to-it-1.0.3.tgz#ac3e406c7ee6cdf0a502dd55db33bab97f7fba76" integrity sha512-+12sHB+Br8HIh6VAMVEG5r3UXCyESIgDW7kzk3BjIXa43DVqVwL7GC5TW3jeh+72dtcH99pPVpw0X8i0jt+/kw== +browser-readablestream-to-it@^2.0.0, browser-readablestream-to-it@^2.0.5: + version "2.0.7" + resolved "https://registry.yarnpkg.com/browser-readablestream-to-it/-/browser-readablestream-to-it-2.0.7.tgz#ddcc4b34a4b08ef415f89eb215297acea3e05fd0" + integrity sha512-g1Aznml3HmqTLSXylZhGwdfnAa67+vlNAYhT9ROJZkAxY7yYmWusND10olvCMPe4sVhZyVwn5tPkRzOg85kBEg== + browser-stdout@1.3.1: version "1.3.1" resolved "https://registry.yarnpkg.com/browser-stdout/-/browser-stdout-1.3.1.tgz#baa559ee14ced73452229bad7326467c61fabd60" @@ -2800,6 +3355,13 @@ bufferutil@^4.0.1: dependencies: node-gyp-build "^4.3.0" +bundle-name@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/bundle-name/-/bundle-name-4.1.0.tgz#f3b96b34160d6431a19d7688135af7cfb8797889" + integrity sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q== + dependencies: + run-applescript "^7.0.0" + busboy@^1.6.0: version "1.6.0" resolved "https://registry.yarnpkg.com/busboy/-/busboy-1.6.0.tgz#966ea36a9502e43cdb9146962523b92f531f6893" @@ -2915,6 +3477,11 @@ cborg@^1.5.4, cborg@^1.6.0: resolved "https://registry.yarnpkg.com/cborg/-/cborg-1.10.2.tgz#83cd581b55b3574c816f82696307c7512db759a1" integrity sha512-b3tFPA9pUr2zCUiCfRd2+wok2/LBSNUMKOuRRok+WlvvAgEt/PlbgPTsZUcwCOs53IJvLgTp0eotwtosE6njug== +cborg@^4.0.0: + version "4.2.7" + resolved "https://registry.yarnpkg.com/cborg/-/cborg-4.2.7.tgz#19769ecaf201461eeef69ca215cf3cbda0a695bd" + integrity sha512-zHTUAm+HAoRLtGEQ1b28HXBm8d/5YP+7eiSKzEu/mpFkptGYaMQCHv15OiQBuyNlIgbCBXvBbZQPl3xvcZTJXg== + chalk@3.0.0, chalk@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/chalk/-/chalk-3.0.0.tgz#3f73c2bf526591f574cc492c51e2456349f844e4" @@ -2975,6 +3542,11 @@ change-case@3.0.2: upper-case "^1.1.1" upper-case-first "^1.1.0" +chardet@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/chardet/-/chardet-0.7.0.tgz#90094849f0937f2eedc2425d0d28a9e5f0cbad9e" + integrity sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA== + checkpoint-store@^1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/checkpoint-store/-/checkpoint-store-1.1.0.tgz#04e4cb516b91433893581e6d4601a78e9552ea06" @@ -3022,6 +3594,13 @@ chokidar@3.5.3: optionalDependencies: fsevents "~2.3.2" +chokidar@4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-4.0.1.tgz#4a6dff66798fb0f72a94f616abbd7e1a19f31d41" + integrity sha512-n8enUVCED/KVRQlab1hr3MVpcVMvxtZjmEa956u+4YijlmQED223XMSYj2tLuKvr4jcCTzNNMpQDUer72MMmzA== + dependencies: + readdirp "^4.0.1" + chownr@^1.0.1, chownr@^1.1.4: version "1.1.4" resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.1.4.tgz#6fc9d7b42d32a583596337666e7d08084da2cc6b" @@ -3077,7 +3656,7 @@ cli-progress@^3.12.0: dependencies: string-width "^4.2.3" -cli-spinners@^2.2.0: +cli-spinners@^2.2.0, cli-spinners@^2.9.2: version "2.9.2" resolved "https://registry.yarnpkg.com/cli-spinners/-/cli-spinners-2.9.2.tgz#1773a8f4b9c4d6ac31563df53b3fc1d79462fe41" integrity sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg== @@ -3102,6 +3681,11 @@ cli-table3@~0.5.0: optionalDependencies: colors "^1.1.2" +cli-width@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-4.1.0.tgz#42daac41d3c254ef38ad8ac037672130173691c5" + integrity sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ== + cliui@^3.2.0: version "3.2.0" resolved "https://registry.yarnpkg.com/cliui/-/cliui-3.2.0.tgz#120601537a916d29940f934da3b48d585a39213d" @@ -3229,6 +3813,14 @@ conf@^10.1.2: pkg-up "^3.1.0" semver "^7.3.5" +config-chain@^1.1.11: + version "1.1.13" + resolved "https://registry.yarnpkg.com/config-chain/-/config-chain-1.1.13.tgz#fad0795aa6a6cdaff9ed1b68e9dff94372c232f4" + integrity sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ== + dependencies: + ini "^1.3.4" + proto-list "~1.2.1" + constant-case@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/constant-case/-/constant-case-2.0.0.tgz#4175764d389d3fa9c8ecd29186ed6005243b6a46" @@ -3253,7 +3845,7 @@ content-hash@^2.5.2: multicodec "^0.5.5" multihashes "^0.4.15" -content-type@~1.0.4, content-type@~1.0.5: +content-type@^1.0.4, content-type@~1.0.4, content-type@~1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.5.tgz#8b773162656d1d1086784c8f23a54ce6d73d7918" integrity sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA== @@ -3432,6 +4024,14 @@ d@1, d@^1.0.1: es5-ext "^0.10.50" type "^1.0.1" +dag-jose@^5.0.0: + version "5.1.1" + resolved "https://registry.yarnpkg.com/dag-jose/-/dag-jose-5.1.1.tgz#02708321f14b6f43990e238010c73464916259a7" + integrity sha512-9alfZ8Wh1XOOMel8bMpDqWsDT72ojFQCJPtwZSev9qh4f8GoCV9qrJW8jcOUhcstO8Kfm09FHGo//jqiZq3z9w== + dependencies: + "@ipld/dag-cbor" "^9.0.0" + multiformats "~13.1.3" + dashdash@^1.12.0: version "1.14.1" resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0" @@ -3472,6 +4072,13 @@ debug@4.3.4, debug@^4.1.1, debug@^4.3.1, debug@^4.3.4: dependencies: ms "2.1.2" +debug@4.3.7: + version "4.3.7" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.7.tgz#87945b4151a011d76d95a198d7111c865c360a52" + integrity sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ== + dependencies: + ms "^2.1.3" + debug@^3.1.0, debug@^3.2.6: version "3.2.7" resolved "https://registry.yarnpkg.com/debug/-/debug-3.2.7.tgz#72580b7e9145fb39b6676f9c5e5fb100b934179a" @@ -3479,6 +4086,13 @@ debug@^3.1.0, debug@^3.2.6: dependencies: ms "^2.1.1" +debug@^4.3.7, debug@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.4.0.tgz#2b3f2aea2ffeb776477460267377dc8710faba8a" + integrity sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA== + dependencies: + ms "^2.1.3" + decamelize@^1.1.1, decamelize@^1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" @@ -3508,6 +4122,19 @@ decompress-response@^6.0.0: dependencies: mimic-response "^3.1.0" +default-browser-id@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/default-browser-id/-/default-browser-id-5.0.0.tgz#a1d98bf960c15082d8a3fa69e83150ccccc3af26" + integrity sha512-A6p/pu/6fyBcA1TRz/GqWYPViplrftcW2gZC9q79ngNCKAeR/X3gcEdXQHl4KNXV+3wgIJ1CPkJQ3IHM6lcsyA== + +default-browser@^5.2.1: + version "5.2.1" + resolved "https://registry.yarnpkg.com/default-browser/-/default-browser-5.2.1.tgz#7b7ba61204ff3e425b556869ae6d3e9d9f1712cf" + integrity sha512-WY/3TUME0x3KPYdRRxEJJvXRHV4PyPoUsxtZa78lwItwRQRHhd2U9xOscaT/YTf8uCXIAjeJOFBVEh/7FtD8Xg== + dependencies: + bundle-name "^4.1.0" + default-browser-id "^5.0.0" + defaults@^1.0.3: version "1.0.4" resolved "https://registry.yarnpkg.com/defaults/-/defaults-1.0.4.tgz#b0b02062c1e2aa62ff5d9528f0f98baa90978d7a" @@ -3544,6 +4171,16 @@ define-data-property@^1.1.2: es-errors "^1.3.0" gopd "^1.0.1" +define-lazy-prop@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz#3f7ae421129bcaaac9bc74905c98a0009ec9ee7f" + integrity sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og== + +define-lazy-prop@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/define-lazy-prop/-/define-lazy-prop-3.0.0.tgz#dbb19adfb746d7fc6d734a06b72f4a00d021255f" + integrity sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg== + delay@^5.0.0: version "5.0.0" resolved "https://registry.yarnpkg.com/delay/-/delay-5.0.0.tgz#137045ef1b96e5071060dd5be60bf9334436bd1d" @@ -3602,6 +4239,13 @@ dns-over-http-resolver@^1.2.3: native-fetch "^3.0.0" receptacle "^1.3.2" +dns-packet@^5.6.1: + version "5.6.1" + resolved "https://registry.yarnpkg.com/dns-packet/-/dns-packet-5.6.1.tgz#ae888ad425a9d1478a0674256ab866de1012cf2f" + integrity sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw== + dependencies: + "@leichtgewicht/ip-codec" "^2.0.1" + docker-compose@0.23.19: version "0.23.19" resolved "https://registry.yarnpkg.com/docker-compose/-/docker-compose-0.23.19.tgz#9947726e2fe67bdfa9e8efe1ff15aa0de2e10eb8" @@ -3609,6 +4253,13 @@ docker-compose@0.23.19: dependencies: yaml "^1.10.2" +docker-compose@1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/docker-compose/-/docker-compose-1.1.0.tgz#ccc21b280430357e51e192e29dc7b4cef81f3784" + integrity sha512-VrkQJNafPQ5d6bGULW0P6KqcxSkv3ZU5Wn2wQA19oB71o7+55vQ9ogFe2MMeNbK+jc9rrKVy280DnHO5JLMWOQ== + dependencies: + yaml "^2.2.2" + docker-modem@^1.0.8: version "1.0.9" resolved "https://registry.yarnpkg.com/docker-modem/-/docker-modem-1.0.9.tgz#a1f13e50e6afb6cf3431b2d5e7aac589db6aaba8" @@ -3682,6 +4333,11 @@ double-ended-queue@2.1.0-0: resolved "https://registry.yarnpkg.com/double-ended-queue/-/double-ended-queue-2.1.0-0.tgz#103d3527fd31528f40188130c841efdd78264e5c" integrity sha512-+BNfZ+deCo8hMNpDqDnvT+c0XpJ5cUa6mqYq89bho2Ifze4URTqRkcwR399hWoTrTkbZ/XJYDgP6rc7pRgffEQ== +eastasianwidth@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/eastasianwidth/-/eastasianwidth-0.2.0.tgz#696ce2ec0aa0e6ea93a397ffcf24aa7840c827cb" + integrity sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA== + ecc-jsbn@~0.1.1: version "0.1.2" resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz#3a83a904e54353287874c564b7549386849a98c9" @@ -3707,6 +4363,13 @@ ejs@^2.6.1: resolved "https://registry.yarnpkg.com/ejs/-/ejs-2.7.4.tgz#48661287573dcc53e366c7a1ae52c3a120eec9ba" integrity sha512-7vmuyh5+kuUyJKePhQfRQBhXV5Ce+RnaeeQArKu1EAMpL3WbgMt5WG6uQZpEVvYSSsxMXRKOewtDk9RaTKXRlA== +ejs@^3.1.10: + version "3.1.10" + resolved "https://registry.yarnpkg.com/ejs/-/ejs-3.1.10.tgz#69ab8358b14e896f80cc39e62087b88500c3ac3b" + integrity sha512-UeJmFfOrAQS8OJWPZ4qtgHyWExa088/MtK5UEyoJGFH67cDEXkZSviOiKRCZ4Xij0zxI3JECgYs3oKx+AizQBA== + dependencies: + jake "^10.8.5" + ejs@^3.1.8: version "3.1.9" resolved "https://registry.yarnpkg.com/ejs/-/ejs-3.1.9.tgz#03c9e8777fe12686a9effcef22303ca3d8eeb361" @@ -3714,7 +4377,7 @@ ejs@^3.1.8: dependencies: jake "^10.8.5" -electron-fetch@^1.7.2: +electron-fetch@^1.7.2, electron-fetch@^1.9.1: version "1.9.1" resolved "https://registry.yarnpkg.com/electron-fetch/-/electron-fetch-1.9.1.tgz#e28bfe78d467de3f2dec884b1d72b8b05322f30f" integrity sha512-M9qw6oUILGVrcENMSRRefE1MbHPIz0h79EKIeJWK9v563aT9Qkh8aEHPO1H5vi970wPirNY+jO9OpFoLiMsMGA== @@ -3754,6 +4417,11 @@ emoji-regex@^8.0.0: resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== +emoji-regex@^9.2.2: + version "9.2.2" + resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-9.2.2.tgz#840c8803b0d8047f4ff0cf963176b32d4ef3ed72" + integrity sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg== + encodeurl@~1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59" @@ -4275,6 +4943,11 @@ eventemitter3@4.0.4: resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.4.tgz#b5463ace635a083d018bdc7c917b4c5f10a85384" integrity sha512-rlaVLnVxtxvoyLsQQFBx53YmXHDxRIzzTLbdfxqi4yocpSjAxXwkU0cScM5JgSKMqEhrZpnvQ2D9gjylR0AimQ== +eventemitter3@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-5.0.1.tgz#53f5ffd0a492ac800721bb42c66b841de96423c4" + integrity sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA== + events@^3.0.0: version "3.3.0" resolved "https://registry.yarnpkg.com/events/-/events-3.3.0.tgz#31a95ad0a924e2d2c419a813aeb2c4e878ea7400" @@ -4368,6 +5041,15 @@ extend@~3.0.2: resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g== +external-editor@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/external-editor/-/external-editor-3.1.0.tgz#cb03f740befae03ea4d283caed2741a83f335495" + integrity sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew== + dependencies: + chardet "^0.7.0" + iconv-lite "^0.4.24" + tmp "^0.0.33" + extsprintf@1.3.0: version "1.3.0" resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.3.0.tgz#96918440e3041a7a414f8c52e3c574eb3c3e1e05" @@ -4423,6 +5105,17 @@ fast-glob@^3.2.9: merge2 "^1.3.0" micromatch "^4.0.4" +fast-glob@^3.3.2: + version "3.3.3" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.3.tgz#d06d585ce8dba90a16b0505c543c3ccfb3aeb818" + integrity sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg== + dependencies: + "@nodelib/fs.stat" "^2.0.2" + "@nodelib/fs.walk" "^1.2.3" + glob-parent "^5.1.2" + merge2 "^1.3.0" + micromatch "^4.0.8" + fast-json-stable-stringify@^2.0.0, fast-json-stable-stringify@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" @@ -4494,6 +5187,13 @@ fill-range@^7.0.1: dependencies: to-regex-range "^5.0.1" +fill-range@^7.1.1: + version "7.1.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" + integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg== + dependencies: + to-regex-range "^5.0.1" + finalhandler@1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.2.0.tgz#7d23fe5731b207b4640e4fcd00aec1f9207a7b32" @@ -4547,6 +5247,11 @@ follow-redirects@^1.12.1, follow-redirects@^1.14.0, follow-redirects@^1.15.0: resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.5.tgz#54d4d6d062c0fa7d9d17feb008461550e3ba8020" integrity sha512-vSFWUON1B+yAw1VN4xMfxgn5fTUiaOzAJCKBwIIgT/+7CuGy9+r+5gITvP62j3RmaD5Ph65UaERdOSRGUzZtgw== +follow-redirects@^1.14.8: + version "1.15.9" + resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.9.tgz#a604fa10e443bf98ca94228d9eebcc2e8a2c8ee1" + integrity sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ== + for-each@^0.3.3: version "0.3.3" resolved "https://registry.yarnpkg.com/for-each/-/for-each-0.3.3.tgz#69b447e88a0a5d32c3e7084f3f1710034b21376e" @@ -4559,6 +5264,14 @@ foreach@^2.0.4: resolved "https://registry.yarnpkg.com/foreach/-/foreach-2.0.6.tgz#87bcc8a1a0e74000ff2bf9802110708cfb02eb6e" integrity sha512-k6GAGDyqLe9JaebCsFCoudPPWfihKu8pylYXRlqP1J7ms39iPoTtk2fviNglIeQEwdh0bQeKJ01ZPyuyQvKzwg== +foreground-child@^3.1.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/foreground-child/-/foreground-child-3.3.0.tgz#0ac8644c06e431439f8561db8ecf29a7b5519c77" + integrity sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg== + dependencies: + cross-spawn "^7.0.0" + signal-exit "^4.0.1" + forever-agent@~0.6.1: version "0.6.1" resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" @@ -4611,6 +5324,15 @@ fs-constants@^1.0.0: resolved "https://registry.yarnpkg.com/fs-constants/-/fs-constants-1.0.0.tgz#6be0de9be998ce16af8afc24497b9ee9b7ccd9ad" integrity sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow== +fs-extra@11.2.0: + version "11.2.0" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-11.2.0.tgz#e70e17dfad64232287d01929399e0ea7c86b0e5b" + integrity sha512-PmDi3uwK5nFuXh7XDTlVnS17xJS7vW36is2+w3xcv8SVxiB4NyATf4ctkVY5bkSjX0Y4nbvZCq1/EjtEyr9ktw== + dependencies: + graceful-fs "^4.2.0" + jsonfile "^6.0.1" + universalify "^2.0.0" + fs-extra@9.1.0, fs-extra@^9.1.0: version "9.1.0" resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-9.1.0.tgz#5954460c764a8da2094ba3554bf839e6b9a7c86d" @@ -4774,6 +5496,18 @@ glob-parent@^5.1.2, glob-parent@~5.1.2: dependencies: is-glob "^4.0.1" +glob@11.0.0: + version "11.0.0" + resolved "https://registry.yarnpkg.com/glob/-/glob-11.0.0.tgz#6031df0d7b65eaa1ccb9b29b5ced16cea658e77e" + integrity sha512-9UiX/Bl6J2yaBbxKoEBRm4Cipxgok8kQYcOPEhScPwebu2I0HoQOuYdIO6S3hLuWoZgpDpwQZMzTFxgpkyT76g== + dependencies: + foreground-child "^3.1.0" + jackspeak "^4.0.1" + minimatch "^10.0.0" + minipass "^7.1.2" + package-json-from-dist "^1.0.0" + path-scurry "^2.0.0" + glob@7.2.0: version "7.2.0" resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.0.tgz#d15535af7732e02e948f4c41628bd910293f6023" @@ -4833,10 +5567,46 @@ globby@^11.1.0: merge2 "^1.4.1" slash "^3.0.0" -gluegun@5.1.6: - version "5.1.6" - resolved "https://registry.yarnpkg.com/gluegun/-/gluegun-5.1.6.tgz#74ec13193913dc610f5c1a4039972c70c96a7bad" - integrity sha512-9zbi4EQWIVvSOftJWquWzr9gLX2kaDgPkNR5dYWbM53eVvCI3iKuxLlnKoHC0v4uPoq+Kr/+F569tjoFbA4DSA== +gluegun@5.1.6: + version "5.1.6" + resolved "https://registry.yarnpkg.com/gluegun/-/gluegun-5.1.6.tgz#74ec13193913dc610f5c1a4039972c70c96a7bad" + integrity sha512-9zbi4EQWIVvSOftJWquWzr9gLX2kaDgPkNR5dYWbM53eVvCI3iKuxLlnKoHC0v4uPoq+Kr/+F569tjoFbA4DSA== + dependencies: + apisauce "^2.1.5" + app-module-path "^2.2.0" + cli-table3 "0.6.0" + colors "1.4.0" + cosmiconfig "7.0.1" + cross-spawn "7.0.3" + ejs "3.1.8" + enquirer "2.3.6" + execa "5.1.1" + fs-jetpack "4.3.1" + lodash.camelcase "^4.3.0" + lodash.kebabcase "^4.1.1" + lodash.lowercase "^4.3.0" + lodash.lowerfirst "^4.3.1" + lodash.pad "^4.5.1" + lodash.padend "^4.6.1" + lodash.padstart "^4.6.1" + lodash.repeat "^4.1.0" + lodash.snakecase "^4.1.1" + lodash.startcase "^4.4.0" + lodash.trim "^4.5.1" + lodash.trimend "^4.5.1" + lodash.trimstart "^4.5.1" + lodash.uppercase "^4.3.0" + lodash.upperfirst "^4.3.1" + ora "4.0.2" + pluralize "^8.0.0" + semver "7.3.5" + which "2.0.2" + yargs-parser "^21.0.0" + +gluegun@5.2.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/gluegun/-/gluegun-5.2.0.tgz#88ba1f76f20e68a135557a4a4c8ea283291a7491" + integrity sha512-jSUM5xUy2ztYFQANne17OUm/oAd7qSX7EBksS9bQDt9UvLPqcEkeWUebmaposb8Tx7eTTD8uJVWGRe6PYSsYkg== dependencies: apisauce "^2.1.5" app-module-path "^2.2.0" @@ -4948,6 +5718,11 @@ got@^11.8.5: p-cancelable "^2.0.0" responselike "^2.0.0" +graceful-fs@4.2.10: + version "4.2.10" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.10.tgz#147d3a006da4ca3ce14728c7aefc287c367d7a6c" + integrity sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA== + graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.1.9, graceful-fs@^4.2.0: version "4.2.11" resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3" @@ -4970,6 +5745,11 @@ graphql@15.5.0: resolved "https://registry.yarnpkg.com/graphql/-/graphql-15.5.0.tgz#39d19494dbe69d1ea719915b578bf920344a69d5" integrity sha512-OmaM7y0kaK31NKG31q4YbD2beNYa6jBBKtMFT6gLYJljHLJr42IqJ8KX08u3Li/0ifzTU5HjmoOOrwa5BRLeDA== +graphql@16.9.0: + version "16.9.0" + resolved "https://registry.yarnpkg.com/graphql/-/graphql-16.9.0.tgz#1c310e63f16a49ce1fbb230bd0a000e99f6f115f" + integrity sha512-GGTKBX4SD7Wdb8mqeDLni2oaRGYQWjWHGKPQ24ZMnUtKfcsVoiv4uX8+LJr1K6U5VW2Lu1BwJnj7uiori0YtRw== + graphql@^15.3.0: version "15.8.0" resolved "https://registry.yarnpkg.com/graphql/-/graphql-15.8.0.tgz#33410e96b012fa3bdb1091cc99a94769db212b38" @@ -5059,6 +5839,11 @@ hash.js@1.1.7, hash.js@^1.0.0, hash.js@^1.0.3, hash.js@^1.1.7: inherits "^2.0.3" minimalistic-assert "^1.0.1" +hashlru@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/hashlru/-/hashlru-2.3.0.tgz#5dc15928b3f6961a2056416bb3a4910216fdfb51" + integrity sha512-0cMsjjIC8I+D3M44pOQdsy0OHXGLVz6Z0beRuufhKa0KfaD2wGwAev6jILzXsd3/vpnNQJmWyZtIILqM1N+n5A== + hasown@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.1.tgz#26f48f039de2c0f8d3356c223fb8d50253519faa" @@ -5136,6 +5921,18 @@ http-cache-semantics@^4.0.0: resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz#abe02fcb2985460bf0323be664436ec3476a6d5a" integrity sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ== +http-call@^5.2.2: + version "5.3.0" + resolved "https://registry.yarnpkg.com/http-call/-/http-call-5.3.0.tgz#4ded815b13f423de176eb0942d69c43b25b148db" + integrity sha512-ahwimsC23ICE4kPl9xTBjKB4inbRaeLyZeRunC/1Jy/Z6X8tv22MEAjK+KBOMSVLaqXPTTmd8638waVIKLGx2w== + dependencies: + content-type "^1.0.4" + debug "^4.1.1" + is-retry-allowed "^1.1.0" + is-stream "^2.0.0" + parse-json "^4.0.0" + tunnel-agent "^0.6.0" + http-errors@2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-2.0.0.tgz#b7774a1486ef73cf7667ac9ae0858c012c57b9d3" @@ -5199,7 +5996,7 @@ hyperlinker@^1.0.0: resolved "https://registry.yarnpkg.com/hyperlinker/-/hyperlinker-1.0.0.tgz#23dc9e38a206b208ee49bc2d6c8ef47027df0c0e" integrity sha512-Ty8UblRWFEcfSuIaajM34LdPXIhbs1ajEX/BBPv24J+enSVaEVY63xQ6lTO9VRYS5LAoghIG0IDJ+p+IPzKUQQ== -iconv-lite@0.4.24: +iconv-lite@0.4.24, iconv-lite@^0.4.24: version "0.4.24" resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b" integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA== @@ -5240,6 +6037,11 @@ immutable@4.2.1: resolved "https://registry.yarnpkg.com/immutable/-/immutable-4.2.1.tgz#8a4025691018c560a40c67e43d698f816edc44d4" integrity sha512-7WYV7Q5BTs0nlQm7tl92rDYYoyELLKHoDMBKhrxEoiV4mrfVdRz8hzPiYOzH7yWjzoVEamxRuAqhxL2PLRwZYQ== +immutable@5.0.3: + version "5.0.3" + resolved "https://registry.yarnpkg.com/immutable/-/immutable-5.0.3.tgz#aa037e2313ea7b5d400cd9298fa14e404c933db1" + integrity sha512-P8IdPQHq3lA1xVeBRi5VPqUm5HDgKnx0Ru51wZz5mjxHr5n3RWhjIpOFU7ybkUxfB+5IToy+OLaHYDBIWsv+uw== + import-fresh@^3.1.0, import-fresh@^3.2.1: version "3.3.0" resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.3.0.tgz#37162c25fcb9ebaa2e6e53d5b4d88ce17d9e0c2b" @@ -5266,6 +6068,11 @@ inherits@2, inherits@2.0.4, inherits@^2.0.1, inherits@^2.0.3, inherits@^2.0.4, i resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== +ini@^1.3.4: + version "1.3.8" + resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.8.tgz#a29da425b48806f34767a4efce397269af28432c" + integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew== + interface-datastore@^6.0.2: version "6.1.1" resolved "https://registry.yarnpkg.com/interface-datastore/-/interface-datastore-6.1.1.tgz#5150a00de2e7513eaadba58bcafd059cb50004c1" @@ -5275,11 +6082,24 @@ interface-datastore@^6.0.2: nanoid "^3.0.2" uint8arrays "^3.0.0" +interface-datastore@^8.3.1: + version "8.3.1" + resolved "https://registry.yarnpkg.com/interface-datastore/-/interface-datastore-8.3.1.tgz#c793f990c5cf078a24a8a2ded13f7e2099a2a282" + integrity sha512-3r0ETmHIi6HmvM5sc09QQiCD3gUfwtEM/AAChOyAd/UAKT69uk8LXfTSUBufbUIO/dU65Vj8nb9O6QjwW8vDSQ== + dependencies: + interface-store "^6.0.0" + uint8arrays "^5.1.0" + interface-store@^2.0.2: version "2.0.2" resolved "https://registry.yarnpkg.com/interface-store/-/interface-store-2.0.2.tgz#83175fd2b0c501585ed96db54bb8ba9d55fce34c" integrity sha512-rScRlhDcz6k199EkHqT8NpM87ebN89ICOzILoBHgaG36/WX50N32BnU/kpZgCGPLhARRAWUUX5/cyaIjt7Kipg== +interface-store@^6.0.0: + version "6.0.2" + resolved "https://registry.yarnpkg.com/interface-store/-/interface-store-6.0.2.tgz#1746a1ee07634f7678b3aa778738b79e3f75c909" + integrity sha512-KSFCXtBlNoG0hzwNa0RmhHtrdhzexp+S+UY2s0rWTBJyfdEIgn6i6Zl9otVqrcFYbYrneBT7hbmHQ8gE0C3umA== + invariant@^2.2.2: version "2.2.4" resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.4.tgz#610f3c92c9359ce1db616e538008d23ff35158e6" @@ -5362,6 +6182,14 @@ ipfs-http-client@55.0.0: stream-to-it "^0.2.2" uint8arrays "^3.0.0" +ipfs-unixfs@^11.1.4: + version "11.2.0" + resolved "https://registry.yarnpkg.com/ipfs-unixfs/-/ipfs-unixfs-11.2.0.tgz#a7f3d1f9bce29033f273bda124a0eb8bc0c752f6" + integrity sha512-J8FN1qM5nfrDo8sQKQwfj0+brTg1uBfZK2vY9hxci33lcl3BFrsELS9+1+4q/8tO1ASKfxZO8W3Pi2O4sVX2Lg== + dependencies: + protons-runtime "^5.5.0" + uint8arraylist "^2.4.8" + ipfs-unixfs@^6.0.3: version "6.0.9" resolved "https://registry.yarnpkg.com/ipfs-unixfs/-/ipfs-unixfs-6.0.9.tgz#f6613b8e081d83faa43ed96e016a694c615a9374" @@ -5429,11 +6257,16 @@ is-core-module@^2.13.0: dependencies: hasown "^2.0.0" -is-docker@^2.0.0: +is-docker@^2.0.0, is-docker@^2.1.1: version "2.2.1" resolved "https://registry.yarnpkg.com/is-docker/-/is-docker-2.2.1.tgz#33eeabe23cfe86f14bde4408a02c0cfb853acdaa" integrity sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ== +is-docker@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/is-docker/-/is-docker-3.0.0.tgz#90093aa3106277d8a77a5910dbae71747e15a200" + integrity sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ== + is-electron@^2.2.0: version "2.2.2" resolved "https://registry.yarnpkg.com/is-electron/-/is-electron-2.2.2.tgz#3778902a2044d76de98036f5dc58089ac4d80bb9" @@ -5495,6 +6328,13 @@ is-hex-prefixed@1.0.0: resolved "https://registry.yarnpkg.com/is-hex-prefixed/-/is-hex-prefixed-1.0.0.tgz#7d8d37e6ad77e5d127148913c573e082d777f554" integrity sha512-WvtOiug1VFrE9v1Cydwm+FnXd3+w9GaeVUss5W4v/SLy3UW00vP+6iNF2SdnfiBoLy4bTqVdkftNGTUeOFVsbA== +is-inside-container@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-inside-container/-/is-inside-container-1.0.0.tgz#e81fba699662eb31dbdaf26766a61d4814717ea4" + integrity sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA== + dependencies: + is-docker "^3.0.0" + is-interactive@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/is-interactive/-/is-interactive-1.0.0.tgz#cea6e6ae5c870a7b0a0004070b7b587e0252912e" @@ -5529,6 +6369,11 @@ is-plain-obj@^2.1.0: resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-2.1.0.tgz#45e42e37fccf1f40da8e5f76ee21515840c09287" integrity sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA== +is-retry-allowed@^1.1.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/is-retry-allowed/-/is-retry-allowed-1.2.0.tgz#d778488bd0a4666a3be8a1482b9f2baafedea8b4" + integrity sha512-RUbUeKwvm3XG2VYamhJL1xFktgjvPzL0Hq8C+6yrWIswDy3BIXGqCxhxkc30N9jqK311gVU137K8Ei55/zVJRg== + is-stream@^1.0.1: version "1.1.0" resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44" @@ -5575,6 +6420,13 @@ is-wsl@^2.2.0: dependencies: is-docker "^2.0.0" +is-wsl@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-3.1.0.tgz#e1c657e39c10090afcbedec61720f6b924c3cbd2" + integrity sha512-UcVfVfaK4Sc4m7X3dUSoHoozQGBEFeDC+zVo06t98xe8CzHSZZBekNXH+tu0NalHolcJ/QAGqS46Hef7QXBIMw== + dependencies: + is-inside-container "^1.0.0" + isarray@0.0.1: version "0.0.1" resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf" @@ -5595,7 +6447,7 @@ isexe@^2.0.0: resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" integrity sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw== -iso-url@^1.1.5: +iso-url@^1.1.5, iso-url@^1.2.1: version "1.2.1" resolved "https://registry.yarnpkg.com/iso-url/-/iso-url-1.2.1.tgz#db96a49d8d9a64a1c889fc07cc525d093afb1811" integrity sha512-9JPDgCN4B7QPkLtYAAOrEuAWvP9rWvR5offAr0/SeF046wIkglqH3VXgYYP6NcsKslH80UIVgmPqNe3j7tG2ng== @@ -5615,11 +6467,21 @@ it-all@^1.0.4: resolved "https://registry.yarnpkg.com/it-all/-/it-all-1.0.6.tgz#852557355367606295c4c3b7eff0136f07749335" integrity sha512-3cmCc6Heqe3uWi3CVM/k51fa/XbMFpQVzFoDsV0IZNHSQDyAXl3c4MjHkFX5kF3922OGj7Myv1nSEUgRtcuM1A== +it-all@^3.0.4: + version "3.0.6" + resolved "https://registry.yarnpkg.com/it-all/-/it-all-3.0.6.tgz#30a4f922ae9ca0945b0f720d3478ae6f5b6707ab" + integrity sha512-HXZWbxCgQZJfrv5rXvaVeaayXED8nTKx9tj9fpBhmcUJcedVZshMMMqTj0RG2+scGypb9Ut1zd1ifbf3lA8L+Q== + it-first@^1.0.6: version "1.0.7" resolved "https://registry.yarnpkg.com/it-first/-/it-first-1.0.7.tgz#a4bef40da8be21667f7d23e44dae652f5ccd7ab1" integrity sha512-nvJKZoBpZD/6Rtde6FXqwDqDZGF1sCADmr2Zoc0hZsIvnE449gRFnGctxDf09Bzc/FWnHXAdaHVIetY6lrE0/g== +it-first@^3.0.4: + version "3.0.6" + resolved "https://registry.yarnpkg.com/it-first/-/it-first-3.0.6.tgz#f532f0f36fe9bf0c291e0162b9d3375d59fe8f05" + integrity sha512-ExIewyK9kXKNAplg2GMeWfgjUcfC1FnUXz/RPfAvIXby+w7U4b3//5Lic0NV03gXT8O/isj5Nmp6KiY0d45pIQ== + it-glob@^1.0.1: version "1.0.2" resolved "https://registry.yarnpkg.com/it-glob/-/it-glob-1.0.2.tgz#bab9b04d6aaac42884502f3a0bfee84c7a29e15e" @@ -5628,21 +6490,57 @@ it-glob@^1.0.1: "@types/minimatch" "^3.0.4" minimatch "^3.0.4" +it-glob@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/it-glob/-/it-glob-3.0.1.tgz#b30723a365e5564273ded2d030d61e15578ba5e2" + integrity sha512-IUWE9f6XVUJLugK7pQmQPqTWj4BiQJhufnvxfsCmNIGEDQEkKVs3Ld9gFZq/Vude6g/OpndhsiuFrA730Bc59A== + dependencies: + fast-glob "^3.3.2" + it-last@^1.0.4: version "1.0.6" resolved "https://registry.yarnpkg.com/it-last/-/it-last-1.0.6.tgz#4106232e5905ec11e16de15a0e9f7037eaecfc45" integrity sha512-aFGeibeiX/lM4bX3JY0OkVCFkAw8+n9lkukkLNivbJRvNz8lI3YXv5xcqhFUV2lDJiraEK3OXRDbGuevnnR67Q== +it-last@^3.0.4: + version "3.0.6" + resolved "https://registry.yarnpkg.com/it-last/-/it-last-3.0.6.tgz#53b1463e47fcaa950375968002598686101de6ab" + integrity sha512-M4/get95O85u2vWvWQinF8SJUc/RPC5bWTveBTYXvlP2q5TF9Y+QhT3nz+CRCyS2YEc66VJkyl/da6WrJ0wKhw== + it-map@^1.0.4: version "1.0.6" resolved "https://registry.yarnpkg.com/it-map/-/it-map-1.0.6.tgz#6aa547e363eedcf8d4f69d8484b450bc13c9882c" integrity sha512-XT4/RM6UHIFG9IobGlQPFQUrlEKkU4eBUFG3qhWhfAdh1JfF2x11ShCrKCdmZ0OiZppPfoLuzcfA4cey6q3UAQ== +it-map@^3.0.5: + version "3.1.1" + resolved "https://registry.yarnpkg.com/it-map/-/it-map-3.1.1.tgz#637877e93be93a7aa7d7fc103b70a5939fc6f7a1" + integrity sha512-9bCSwKD1yN1wCOgJ9UOl+46NQtdatosPWzxxUk2NdTLwRPXLh+L7iwCC9QKsbgM60RQxT/nH8bKMqm3H/o8IHQ== + dependencies: + it-peekable "^3.0.0" + it-peekable@^1.0.2: version "1.0.3" resolved "https://registry.yarnpkg.com/it-peekable/-/it-peekable-1.0.3.tgz#8ebe933767d9c5aa0ae4ef8e9cb3a47389bced8c" integrity sha512-5+8zemFS+wSfIkSZyf0Zh5kNN+iGyccN02914BY4w/Dj+uoFEoPSvj5vaWn8pNZJNSxzjW0zHRxC3LUb2KWJTQ== +it-peekable@^3.0.0, it-peekable@^3.0.3: + version "3.0.5" + resolved "https://registry.yarnpkg.com/it-peekable/-/it-peekable-3.0.5.tgz#63b0c750e27e2ba0c1db6d6a3496b7ef51a6547d" + integrity sha512-JWQOGMt6rKiPcY30zUVMR4g6YxkpueTwHVE7CMs/aGqCf4OydM6w+7ZM3PvmO1e0TocjuR4aL8xyZWR46cTqCQ== + +it-pushable@^3.2.3: + version "3.2.3" + resolved "https://registry.yarnpkg.com/it-pushable/-/it-pushable-3.2.3.tgz#e2b80aed90cfbcd54b620c0a0785e546d4e5f334" + integrity sha512-gzYnXYK8Y5t5b/BnJUr7glfQLO4U5vyb05gPx/TyTw+4Bv1zM9gFk4YsOrnulWefMewlphCjKkakFvj1y99Tcg== + dependencies: + p-defer "^4.0.0" + +it-stream-types@^2.0.1, it-stream-types@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/it-stream-types/-/it-stream-types-2.0.2.tgz#60bbace90096796b4e6cc3bfab99cf9f2b86c152" + integrity sha512-Rz/DEZ6Byn/r9+/SBCuJhpPATDF9D+dz5pbgSUyBsCDtza6wtNATrz/jz1gDyNanC3XdLboriHnOC925bZRBww== + it-to-stream@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/it-to-stream/-/it-to-stream-1.0.0.tgz#6c47f91d5b5df28bda9334c52782ef8e97fe3a4a" @@ -5655,6 +6553,13 @@ it-to-stream@^1.0.0: p-fifo "^1.0.0" readable-stream "^3.6.0" +jackspeak@^4.0.1: + version "4.0.2" + resolved "https://registry.yarnpkg.com/jackspeak/-/jackspeak-4.0.2.tgz#11f9468a3730c6ff6f56823a820d7e3be9bef015" + integrity sha512-bZsjR/iRjl1Nk1UkjGpAzLNfQtzuijhn2g+pbZb98HQ1Gk8vM9hfbxeMBP+M2/UUdwj0RqGG3mlvk2MsAqwvEw== + dependencies: + "@isaacs/cliui" "^8.0.2" + jake@^10.8.5: version "10.8.7" resolved "https://registry.yarnpkg.com/jake/-/jake-10.8.7.tgz#63a32821177940c33f356e0ba44ff9d34e1c7d8f" @@ -5683,6 +6588,24 @@ jayson@4.0.0: uuid "^8.3.2" ws "^7.4.5" +jayson@4.1.3: + version "4.1.3" + resolved "https://registry.yarnpkg.com/jayson/-/jayson-4.1.3.tgz#db9be2e4287d9fef4fc05b5fe367abe792c2eee8" + integrity sha512-LtXh5aYZodBZ9Fc3j6f2w+MTNcnxteMOrb+QgIouguGOulWi0lieEkOUg+HkjjFs0DGoWDds6bi4E9hpNFLulQ== + dependencies: + "@types/connect" "^3.4.33" + "@types/node" "^12.12.54" + "@types/ws" "^7.4.4" + JSONStream "^1.3.5" + commander "^2.20.3" + delay "^5.0.0" + es6-promisify "^5.0.0" + eyes "^0.1.8" + isomorphic-ws "^4.0.1" + json-stringify-safe "^5.0.1" + uuid "^8.3.2" + ws "^7.5.10" + js-sha3@0.5.7, js-sha3@^0.5.7: version "0.5.7" resolved "https://registry.yarnpkg.com/js-sha3/-/js-sha3-0.5.7.tgz#0d4ffd8002d5333aabaf4a23eed2f6374c9f28e7" @@ -5733,6 +6656,11 @@ json-buffer@3.0.1: resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.1.tgz#9338802a30d3b6605fbe0613e094008ca8c05a13" integrity sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ== +json-parse-better-errors@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz#bb867cfb3450e69107c131d1c514bab3dc8bcaa9" + integrity sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw== + json-parse-even-better-errors@^2.3.0: version "2.3.1" resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz#7c47805a94319928e05777405dc12e1f7a4ee02d" @@ -5873,6 +6801,45 @@ klaw@^1.0.0: optionalDependencies: graceful-fs "^4.1.9" +kubo-rpc-client@^5.0.2: + version "5.0.2" + resolved "https://registry.yarnpkg.com/kubo-rpc-client/-/kubo-rpc-client-5.0.2.tgz#17d635547139ab2e697bc4ec33bf3c8382209dd2" + integrity sha512-0w8VUwpxtkynLlJsAnM+es3qR6Nvv0/oqg0I+sCgI65rh8OPoBYpsk58/miD+u/OkIhJKBwslfeJ9y7Ujb40+g== + dependencies: + "@ipld/dag-cbor" "^9.0.0" + "@ipld/dag-json" "^10.0.0" + "@ipld/dag-pb" "^4.0.0" + "@libp2p/crypto" "^5.0.0" + "@libp2p/interface" "^2.0.0" + "@libp2p/logger" "^5.0.0" + "@libp2p/peer-id" "^5.0.0" + "@multiformats/multiaddr" "^12.2.1" + "@multiformats/multiaddr-to-uri" "^10.0.1" + any-signal "^4.1.1" + blob-to-it "^2.0.5" + browser-readablestream-to-it "^2.0.5" + dag-jose "^5.0.0" + electron-fetch "^1.9.1" + err-code "^3.0.1" + ipfs-unixfs "^11.1.4" + iso-url "^1.2.1" + it-all "^3.0.4" + it-first "^3.0.4" + it-glob "^3.0.1" + it-last "^3.0.4" + it-map "^3.0.5" + it-peekable "^3.0.3" + it-to-stream "^1.0.0" + merge-options "^3.0.4" + multiformats "^13.1.0" + nanoid "^5.0.7" + native-fetch "^4.0.2" + parse-duration "^1.0.2" + react-native-fetch-api "^3.0.0" + stream-to-it "^1.0.1" + uint8arrays "^5.0.3" + wherearewe "^2.0.1" + lcid@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/lcid/-/lcid-1.0.0.tgz#308accafa0bc483a3867b4b6f2b9506251d1b835" @@ -6053,6 +7020,11 @@ levelup@^1.2.1: semver "~5.4.1" xtend "~4.0.0" +lilconfig@^3.1.2, lilconfig@^3.1.3: + version "3.1.3" + resolved "https://registry.yarnpkg.com/lilconfig/-/lilconfig-3.1.3.tgz#a1bcfd6257f9585bf5ae14ceeebb7b559025e4c4" + integrity sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw== + lines-and-columns@^1.1.6: version "1.2.4" resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz#eca284f75d2965079309dc0ad9255abb2ebc1632" @@ -6266,6 +7238,11 @@ lowercase-keys@^3.0.0: resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-7.13.1.tgz#267a81fbd0881327c46a81c5922606a2cfe336c4" integrity sha512-CHqbAq7NFlW3RSnoWXLJBxCWaZVBrfa9UEHId2M3AW8iEBurbqduNexEUCGc3SHc6iCYXNJCDi903LajSVAEPQ== +lru-cache@^11.0.0: + version "11.0.2" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-11.0.2.tgz#fbd8e7cf8211f5e7e5d91905c415a3f55755ca39" + integrity sha512-123qHRfJBmo2jXDbo/a5YOQrJoHF/GNQTLzQ5+IdK5pWpceK17yRc6ozlWd25FxvGKQbIUs91fDFkXmDHTKcyA== + lru-cache@^5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" @@ -6380,6 +7357,14 @@ micromatch@^4.0.4: braces "^3.0.2" picomatch "^2.3.1" +micromatch@^4.0.8: + version "4.0.8" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202" + integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA== + dependencies: + braces "^3.0.3" + picomatch "^2.3.1" + mime-db@1.52.0: version "1.52.0" resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70" @@ -6441,6 +7426,13 @@ minimatch@5.0.1: dependencies: brace-expansion "^2.0.1" +minimatch@^10.0.0: + version "10.0.1" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-10.0.1.tgz#ce0521856b453c86e25f2c4c0d03e6ff7ddc440b" + integrity sha512-ethXTt3SGGR+95gudmqJ1eNhRO7eGEGIgYA9vnPatK4/etz2MEVDno5GMCibdMTuBMyElzIlgxMna3K94XDIDQ== + dependencies: + brace-expansion "^2.0.1" + minimatch@^3.0.2, minimatch@^3.0.4, minimatch@^3.1.1, minimatch@^3.1.2: version "3.1.2" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" @@ -6462,6 +7454,13 @@ minimatch@^8.0.2: dependencies: brace-expansion "^2.0.1" +minimatch@^9.0.5: + version "9.0.5" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.5.tgz#d74f9dd6b57d83d8e98cfb82133b03978bc929e5" + integrity sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow== + dependencies: + brace-expansion "^2.0.1" + minimist@^1.2.6: version "1.2.8" resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" @@ -6497,6 +7496,11 @@ minipass@^5.0.0: resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.0.4.tgz#dbce03740f50a4786ba994c1fb908844d27b038c" integrity sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ== +minipass@^7.1.2: + version "7.1.2" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.2.tgz#93a9626ce5e5e66bd4db86849e7515e92340a707" + integrity sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw== + minizlib@^1.3.3: version "1.3.3" resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-1.3.3.tgz#2290de96818a34c29551c8a8d301216bd65a861d" @@ -6583,11 +7587,16 @@ ms@2.1.2: resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== -ms@2.1.3, ms@^2.1.1: +ms@2.1.3, ms@^2.1.1, ms@^2.1.3: version "2.1.3" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== +ms@^3.0.0-canary.1: + version "3.0.0-canary.1" + resolved "https://registry.yarnpkg.com/ms/-/ms-3.0.0-canary.1.tgz#c7b34fbce381492fd0b345d1cf56e14d67b77b80" + integrity sha512-kh8ARjh8rMN7Du2igDRO9QJnqCb2xYTJxyQYK7vJJS4TvLLmsbyhiKpSW+t+y26gyOyMd0riphX0GeWKU3ky5g== + multiaddr-to-uri@^8.0.0: version "8.0.0" resolved "https://registry.yarnpkg.com/multiaddr-to-uri/-/multiaddr-to-uri-8.0.0.tgz#65efe4b1f9de5f6b681aa42ff36a7c8db7625e58" @@ -6638,11 +7647,21 @@ multicodec@^1.0.0: buffer "^5.6.0" varint "^5.0.0" +multiformats@^13.0.0, multiformats@^13.1.0, multiformats@^13.3.1: + version "13.3.1" + resolved "https://registry.yarnpkg.com/multiformats/-/multiformats-13.3.1.tgz#ea30d134b5697dcf2036ac819a17948f8a1775be" + integrity sha512-QxowxTNwJ3r5RMctoGA5p13w5RbRT2QDkoM+yFlqfLiioBp78nhDjnRLvmSBI9+KAqN4VdgOVWM9c0CHd86m3g== + multiformats@^9.4.13, multiformats@^9.4.2, multiformats@^9.4.5, multiformats@^9.5.4: version "9.9.0" resolved "https://registry.yarnpkg.com/multiformats/-/multiformats-9.9.0.tgz#c68354e7d21037a8f1f8833c8ccd68618e8f1d37" integrity sha512-HoMUjhH9T8DDBNT+6xzkrd9ga/XiBI4xLr58LJACwK6G3HTOPeMz4nB4KJs33L2BelrIJa7P0VuNaVF3hMYfjg== +multiformats@~13.1.3: + version "13.1.3" + resolved "https://registry.yarnpkg.com/multiformats/-/multiformats-13.1.3.tgz#36d312401ff424948ef90746fbda9dd798cffa09" + integrity sha512-CZPi9lFZCM/+7oRolWYsvalsyWQGFo+GpdaTmjxXXomC+nP/W1Rnxb9sUgjvmNmRZ5bOPqRAl4nuK+Ydw/4tGw== + multihashes@^0.4.15, multihashes@~0.4.15: version "0.4.21" resolved "https://registry.yarnpkg.com/multihashes/-/multihashes-0.4.21.tgz#dc02d525579f334a7909ade8a122dabb58ccfcb5" @@ -6657,6 +7676,11 @@ mute-stream@0.0.8: resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.8.tgz#1630c42b2251ff81e2a283de96a5497ea92e5e0d" integrity sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA== +mute-stream@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-2.0.0.tgz#a5446fc0c512b71c83c44d908d5c7b7b4c493b2b" + integrity sha512-WWdIxpyjEn+FhQJQQv9aQAYlHoNVdzIzUySNV1gHUPDSdZJ3yZn7pAAbQcV7B56Mvu881q9FZV+0Vx2xC44VWA== + nano-base32@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/nano-base32/-/nano-base32-1.0.1.tgz#ba548c879efcfb90da1c4d9e097db4a46c9255ef" @@ -6677,6 +7701,11 @@ nanoid@^3.0.2, nanoid@^3.1.20, nanoid@^3.1.23: resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.7.tgz#d0c301a691bc8d54efa0a2226ccf3fe2fd656bd8" integrity sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g== +nanoid@^5.0.7: + version "5.0.9" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-5.0.9.tgz#977dcbaac055430ce7b1e19cf0130cea91a20e50" + integrity sha512-Aooyr6MXU6HpvvWXKoVoXwKMs/KyVakWwg7xQfv5/S/RIgJMy0Ifa45H9qqYy7pTCszrHzP21Uk4PZq2HpEM8Q== + napi-macros@~2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/napi-macros/-/napi-macros-2.0.0.tgz#2b6bae421e7b96eb687aa6c77a7858640670001b" @@ -6692,6 +7721,11 @@ native-fetch@^3.0.0: resolved "https://registry.yarnpkg.com/native-fetch/-/native-fetch-3.0.0.tgz#06ccdd70e79e171c365c75117959cf4fe14a09bb" integrity sha512-G3Z7vx0IFb/FQ4JxvtqGABsOTIqRWvgQz6e+erkB+JJD6LrszQtMozEHI4EkmgZQvnGHrpLVzUWk7t4sJCIkVw== +native-fetch@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/native-fetch/-/native-fetch-4.0.2.tgz#75c8a44c5f3bb021713e5e24f2846750883e49af" + integrity sha512-4QcVlKFtv2EYVS5MBgsGX5+NWKtbDbIECdUXDBGDMAZXq3Jkv9zf+y8iS7Ub8fEdga3GpYeazp9gauNqXHJOCg== + natural-orderby@^2.0.3: version "2.0.3" resolved "https://registry.yarnpkg.com/natural-orderby/-/natural-orderby-2.0.3.tgz#8623bc518ba162f8ff1cdb8941d74deb0fdcc016" @@ -6883,6 +7917,25 @@ onetime@^5.1.0, onetime@^5.1.2: dependencies: mimic-fn "^2.1.0" +open@10.1.0: + version "10.1.0" + resolved "https://registry.yarnpkg.com/open/-/open-10.1.0.tgz#a7795e6e5d519abe4286d9937bb24b51122598e1" + integrity sha512-mnkeQ1qP5Ue2wd+aivTD3NHd/lZ96Lu0jgf0pwktLPtx6cTZiH7tyeGRRHs0zX0rbrahXPnXlUnbeXyaBBuIaw== + dependencies: + default-browser "^5.2.1" + define-lazy-prop "^3.0.0" + is-inside-container "^1.0.0" + is-wsl "^3.1.0" + +open@8.4.2: + version "8.4.2" + resolved "https://registry.yarnpkg.com/open/-/open-8.4.2.tgz#5b5ffe2a8f793dcd2aad73e550cb87b59cb084f9" + integrity sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ== + dependencies: + define-lazy-prop "^2.0.0" + is-docker "^2.1.1" + is-wsl "^2.2.0" + ora@4.0.2: version "4.0.2" resolved "https://registry.yarnpkg.com/ora/-/ora-4.0.2.tgz#0e1e68fd45b135d28648b27cf08081fa6e8a297d" @@ -6947,6 +8000,11 @@ p-defer@^3.0.0: resolved "https://registry.yarnpkg.com/p-defer/-/p-defer-3.0.0.tgz#d1dceb4ee9b2b604b1d94ffec83760175d4e6f83" integrity sha512-ugZxsxmtTln604yeYd29EGrNhazN2lywetzpKhfmQjW/VJmhpDmWbiX+h0zL8V91R0UXkhb3KtPmyq9PZw3aYw== +p-defer@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/p-defer/-/p-defer-4.0.1.tgz#d12c6d41420785ed0d162dbd86b71ba490f7f99e" + integrity sha512-Mr5KC5efvAK5VUptYEIopP1bakB85k2IWXaRC0rsh1uwn1L6M0LVml8OIQ4Gudg4oyZakf7FmeRLkMMtZW1i5A== + p-fifo@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/p-fifo/-/p-fifo-1.0.0.tgz#e29d5cf17c239ba87f51dde98c1d26a9cfe20a63" @@ -7002,6 +8060,19 @@ p-locate@^5.0.0: dependencies: p-limit "^3.0.2" +p-queue@^8.0.1: + version "8.0.1" + resolved "https://registry.yarnpkg.com/p-queue/-/p-queue-8.0.1.tgz#718b7f83836922ef213ddec263ff4223ce70bef8" + integrity sha512-NXzu9aQJTAzbBqOt2hwsR63ea7yvxJc0PwN/zobNAudYfb1B7R08SzB4TsLeSbUCuG467NhnoT0oO6w1qRO+BA== + dependencies: + eventemitter3 "^5.0.1" + p-timeout "^6.1.2" + +p-timeout@^6.1.2: + version "6.1.4" + resolved "https://registry.yarnpkg.com/p-timeout/-/p-timeout-6.1.4.tgz#418e1f4dd833fa96a2e3f532547dd2abdb08dbc2" + integrity sha512-MyIV3ZA/PmyBN/ud8vV9XzwTrNtR4jFrObymZYnZqMmW0zA8Z17vnT0rBgFE/TlohB+YCHqXMgZzb3Csp49vqg== + p-try@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/p-try/-/p-try-1.0.0.tgz#cbc79cdbaf8fd4228e13f621f2b1a237c1b207b3" @@ -7012,6 +8083,11 @@ p-try@^2.0.0: resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== +package-json-from-dist@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz#4f1471a010827a86f94cfd9b0727e36d267de505" + integrity sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw== + param-case@^2.1.0: version "2.1.1" resolved "https://registry.yarnpkg.com/param-case/-/param-case-2.1.1.tgz#df94fd8cf6531ecf75e6bef9a0858fbc72be2247" @@ -7036,6 +8112,11 @@ parse-duration@^1.0.0: resolved "https://registry.yarnpkg.com/parse-duration/-/parse-duration-1.1.0.tgz#5192084c5d8f2a3fd676d04a451dbd2e05a1819c" integrity sha512-z6t9dvSJYaPoQq7quMzdEagSFtpGu+utzHqqxmpVWNNZRIXnvqyCvn9XsTdh7c/w0Bqmdz3RB3YnRaKtpRtEXQ== +parse-duration@^1.0.2: + version "1.1.1" + resolved "https://registry.yarnpkg.com/parse-duration/-/parse-duration-1.1.1.tgz#b6b4378e26c352b4e2e8e79c1b7abb3d687e5bd2" + integrity sha512-27m0hKqcGzYFGtrZ1FPSNuAUi1mvqYIUjHHIgYYAc+4wcj7t2o7Qj3X4s7THMOYyeTcFjmKUZu0yJG2oE947bw== + parse-headers@^2.0.0: version "2.0.5" resolved "https://registry.yarnpkg.com/parse-headers/-/parse-headers-2.0.5.tgz#069793f9356a54008571eb7f9761153e6c770da9" @@ -7048,6 +8129,14 @@ parse-json@^2.2.0: dependencies: error-ex "^1.2.0" +parse-json@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-4.0.0.tgz#be35f5425be1f7f6c747184f98a788cb99477ee0" + integrity sha512-aOIos8bujGN93/8Ox/jPLh7RwVnPEysynVFE+fQZyg6jKELEHwzgKdLRFHUgXJL6kylijVSBC4BvN9OmsB48Rw== + dependencies: + error-ex "^1.3.1" + json-parse-better-errors "^1.0.1" + parse-json@^5.0.0: version "5.2.0" resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-5.2.0.tgz#c76fc66dee54231c962b22bcc8a72cf2f99753cd" @@ -7141,6 +8230,14 @@ path-scurry@^1.6.1: lru-cache "^9.1.1 || ^10.0.0" minipass "^5.0.0 || ^6.0.2 || ^7.0.0" +path-scurry@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/path-scurry/-/path-scurry-2.0.0.tgz#9f052289f23ad8bf9397a2a0425e7b8615c58580" + integrity sha512-ypGJsmGtdXUOeM5u93TyeIEfEhM6s+ljAhrk5vAvSx8uyY/02OvrZnA0YNGUrPXfpJMgI1ODd3nwz8Npx4O4cg== + dependencies: + lru-cache "^11.0.0" + minipass "^7.1.2" + path-to-regexp@0.1.7: version "0.1.7" resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" @@ -7421,6 +8518,11 @@ prettier@3.0.3: resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.0.3.tgz#432a51f7ba422d1469096c0fdc28e235db8f9643" integrity sha512-L/4pUDMxcNa8R/EthV08Zt42WBO4h1rarVtK0K+QJG0X187OLo7l699jWw0GKuwzkPQ//jMFA/8Xm6Fh3J/DAg== +prettier@3.4.2: + version "3.4.2" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.4.2.tgz#a5ce1fb522a588bf2b78ca44c6e6fe5aa5a2b13f" + integrity sha512-e9MewbtFo+Fevyuxn/4rrcDAaq0IYxPGLvObpQjiZBMAzB9IGmzlnG9RZy3FFas+eBMu2vA0CszMeduow5dIuQ== + private@^0.1.8: version "0.1.8" resolved "https://registry.yarnpkg.com/private/-/private-0.1.8.tgz#2381edb3689f7a53d653190060fcf822d2f368ff" @@ -7436,6 +8538,11 @@ process@^0.11.10: resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" integrity sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A== +progress-events@^1.0.0, progress-events@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/progress-events/-/progress-events-1.0.1.tgz#693b6d4153f08c1418ae3cd5fcad8596c91db7e8" + integrity sha512-MOzLIwhpt64KIVN64h1MwdKWiyKFNc/S6BoYKPIVUHFg0/eIEyBulhWCgn678v/4c0ri3FdGuzXymNCv02MUIw== + promise-to-callback@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/promise-to-callback/-/promise-to-callback-1.0.0.tgz#5d2a749010bfb67d963598fcd3960746a68feef7" @@ -7451,6 +8558,11 @@ promise@^8.0.0: dependencies: asap "~2.0.6" +proto-list@~1.2.1: + version "1.2.4" + resolved "https://registry.yarnpkg.com/proto-list/-/proto-list-1.2.4.tgz#212d5bfe1318306a420f6402b8e26ff39647a849" + integrity sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA== + protobufjs@^6.10.2: version "6.11.4" resolved "https://registry.yarnpkg.com/protobufjs/-/protobufjs-6.11.4.tgz#29a412c38bf70d89e537b6d02d904a6f448173aa" @@ -7470,6 +8582,15 @@ protobufjs@^6.10.2: "@types/node" ">=13.7.0" long "^4.0.0" +protons-runtime@^5.5.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/protons-runtime/-/protons-runtime-5.5.0.tgz#ea06d9ef843aad77ea5de3e1ebafa81b58c24570" + integrity sha512-EsALjF9QsrEk6gbCx3lmfHxVN0ah7nG3cY7GySD4xf4g8cr7g543zB88Foh897Sr1RQJ9yDCUsoT1i1H/cVUFA== + dependencies: + uint8-varint "^2.0.2" + uint8arraylist "^2.4.3" + uint8arrays "^5.0.1" + proxy-addr@~2.0.7: version "2.0.7" resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-2.0.7.tgz#f19fe69ceab311eeb94b42e70e8c2070f9ba1025" @@ -7677,6 +8798,11 @@ readable-stream@~1.0.15, readable-stream@~1.0.26-4: isarray "0.0.1" string_decoder "~0.10.x" +readdirp@^4.0.1: + version "4.0.2" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-4.0.2.tgz#388fccb8b75665da3abffe2d8f8ed59fe74c230a" + integrity sha512-yDMz9g+VaZkqBYS/ozoBJwaBhTbZo3UNYQHNRw1D3UFQB8oHB4uS/tAODO+ZLjGWmUbKnIlOWO+aaIiAxrUWHA== + readdirp@~3.6.0: version "3.6.0" resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-3.6.0.tgz#74a370bd857116e245b29cc97340cd431a02a6c7" @@ -7730,6 +8856,13 @@ regenerator-runtime@^0.14.0: resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz#356ade10263f685dda125100cd862c1db895327f" integrity sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw== +registry-auth-token@^5.0.3: + version "5.0.3" + resolved "https://registry.yarnpkg.com/registry-auth-token/-/registry-auth-token-5.0.3.tgz#417d758c8164569de8cf5cabff16cc937902dcc6" + integrity sha512-1bpc9IyC+e+CNFRaWyn77tk4xGG4PPUyfakSmA6F6cvUDjrm58dfyJ3II+9yb10EDkHoy1LaPSmHaWLOH3m6HA== + dependencies: + "@pnpm/npm-conf" "^2.1.0" + repeating@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/repeating/-/repeating-2.0.1.tgz#5214c53a926d3552707527fbab415dbc08d06dda" @@ -7885,6 +9018,11 @@ rlp@^2.0.0, rlp@^2.2.3, rlp@^2.2.4: dependencies: bn.js "^5.2.0" +run-applescript@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/run-applescript/-/run-applescript-7.0.0.tgz#e5a553c2bffd620e169d276c1cd8f1b64778fbeb" + integrity sha512-9by4Ij99JUr/MCFBUkDKLWK3G9HVXmabKz9U5MlIAIuvuzkiOicRYs8XJLxX+xahD+mLiiCYDqF9dKAgtzKP1A== + run-parallel@^1.1.9: version "1.2.0" resolved "https://registry.yarnpkg.com/run-parallel/-/run-parallel-1.2.0.tgz#66d1368da7bdf921eb9d95bd1a9229e7f21a43ee" @@ -7962,6 +9100,11 @@ semver@7.4.0: dependencies: lru-cache "^6.0.0" +semver@7.6.3, semver@^7.6.3: + version "7.6.3" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.3.tgz#980f7b5550bc175fb4dc09403085627f9eb33143" + integrity sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A== + semver@^6.3.1: version "6.3.1" resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" @@ -8118,6 +9261,11 @@ signal-exit@^3.0.2, signal-exit@^3.0.3: resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== +signal-exit@^4.0.1, signal-exit@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-4.1.0.tgz#952188c1cbd546070e2dd20d0f41c0ae0530cb04" + integrity sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw== + simple-concat@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/simple-concat/-/simple-concat-1.0.1.tgz#f46976082ba35c2263f1c8ab5edfe26c41c9552f" @@ -8275,6 +9423,13 @@ stream-to-it@^0.2.2: dependencies: get-iterator "^1.0.2" +stream-to-it@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/stream-to-it/-/stream-to-it-1.0.1.tgz#7d5e1b04bab70facd48273279bfa49f0d0165950" + integrity sha512-AqHYAYPHcmvMrcLNgncE/q0Aj/ajP6A4qGhxP6EVn7K3YTNs0bJpJyk57wc2Heb7MUL64jurvmnmui8D9kjZgA== + dependencies: + it-stream-types "^2.0.1" + streamsearch@^1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/streamsearch/-/streamsearch-1.1.0.tgz#404dd1e2247ca94af554e841a8ef0eaa238da764" @@ -8285,6 +9440,15 @@ strict-uri-encode@^1.0.0: resolved "https://registry.yarnpkg.com/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz#279b225df1d582b1f54e65addd4352e18faa0713" integrity sha512-R3f198pcvnB+5IpnBlRkphuE9n46WyVl8I39W/ZUTZLz4nqSP/oLYUrcnJrw462Ds8he4YKMov2efsTIw1BDGQ== +"string-width-cjs@npm:string-width@^4.2.0": + version "4.2.3" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.1" + string-width@^1.0.1: version "1.0.2" resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3" @@ -8311,6 +9475,15 @@ string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2 is-fullwidth-code-point "^3.0.0" strip-ansi "^6.0.1" +string-width@^5.0.1, string-width@^5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-5.1.2.tgz#14f8daec6d81e7221d2a357e668cab73bdbca794" + integrity sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA== + dependencies: + eastasianwidth "^0.2.0" + emoji-regex "^9.2.2" + strip-ansi "^7.0.1" + string_decoder@^1.1.1: version "1.3.0" resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.3.0.tgz#42f114594a46cf1a8e30b0a84f56c78c3edac21e" @@ -8330,6 +9503,13 @@ string_decoder@~1.1.1: dependencies: safe-buffer "~5.1.0" +"strip-ansi-cjs@npm:strip-ansi@^6.0.1": + version "6.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== + dependencies: + ansi-regex "^5.0.1" + strip-ansi@^3.0.0, strip-ansi@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf" @@ -8358,6 +9538,13 @@ strip-ansi@^6.0.0, strip-ansi@^6.0.1: dependencies: ansi-regex "^5.0.1" +strip-ansi@^7.0.1: + version "7.1.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-7.1.0.tgz#d5b6568ca689d8561370b0707685d22434faff45" + integrity sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ== + dependencies: + ansi-regex "^6.0.1" + strip-bom@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-2.0.0.tgz#6219a85616520491f35788bdbf1447a99c7e6b0e" @@ -8397,7 +9584,7 @@ sublevel-pouchdb@7.3.1: ltgt "2.2.1" readable-stream "1.1.14" -supports-color@8.1.1, supports-color@^8.1.1: +supports-color@8.1.1, supports-color@^8, supports-color@^8.1.1: version "8.1.1" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-8.1.1.tgz#cd6fc17e28500cff56c1b86c0a7fd4a54a73005c" integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q== @@ -8423,6 +9610,11 @@ supports-color@^7.0.0, supports-color@^7.1.0: dependencies: has-flag "^4.0.0" +supports-color@^9.4.0: + version "9.4.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-9.4.0.tgz#17bfcf686288f531db3dea3215510621ccb55954" + integrity sha512-VL+lNrEoIXww1coLPOmiEmK/0sGigko5COxI09KzHc2VJXJsQ37UaQ+8quuxjDeA7+KnLGTWRyOXSLLR2Wb4jw== + supports-hyperlinks@^2.2.0: version "2.3.0" resolved "https://registry.yarnpkg.com/supports-hyperlinks/-/supports-hyperlinks-2.3.0.tgz#3943544347c1ff90b15effb03fc14ae45ec10624" @@ -8530,6 +9722,18 @@ tar@^6.1.0: mkdirp "^1.0.3" yallist "^4.0.0" +tar@^6.1.11: + version "6.2.1" + resolved "https://registry.yarnpkg.com/tar/-/tar-6.2.1.tgz#717549c541bc3c2af15751bea94b1dd068d4b03a" + integrity sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A== + dependencies: + chownr "^2.0.0" + fs-minipass "^2.0.0" + minipass "^5.0.0" + minizlib "^2.1.1" + mkdirp "^1.0.3" + yallist "^4.0.0" + testrpc@0.0.1: version "0.0.1" resolved "https://registry.yarnpkg.com/testrpc/-/testrpc-0.0.1.tgz#83e2195b1f5873aec7be1af8cbe6dcf39edb7aed" @@ -8599,7 +9803,7 @@ tmp-promise@3.0.3: dependencies: tmp "^0.2.0" -tmp@0.0.33: +tmp@0.0.33, tmp@^0.0.33: version "0.0.33" resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.33.tgz#6d34335889768d21b2bcda0aa277ced3b1bfadf9" integrity sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw== @@ -8704,6 +9908,11 @@ tslib@^2.0.0, tslib@^2.1.0, tslib@^2.3.1, tslib@^2.4.0, tslib@^2.5.0, tslib@^2.6 resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae" integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q== +tslib@^2.6.3: + version "2.8.1" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.8.1.tgz#612efe4ed235d567e8aba5f2a5fab70280ade83f" + integrity sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w== + tslib@~2.4.0: version "2.4.1" resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.4.1.tgz#0d0bfbaac2880b91e22df0768e55be9753a5b17e" @@ -8785,6 +9994,21 @@ typescript-tuple@^2.2.1: dependencies: typescript-compare "^0.0.2" +uint8-varint@^2.0.1, uint8-varint@^2.0.2: + version "2.0.4" + resolved "https://registry.yarnpkg.com/uint8-varint/-/uint8-varint-2.0.4.tgz#85be52b3849eb30f2c3640a2df8a14364180affb" + integrity sha512-FwpTa7ZGA/f/EssWAb5/YV6pHgVF1fViKdW8cWaEarjB8t7NyofSWBdOTyFPaGuUG4gx3v1O3PQ8etsiOs3lcw== + dependencies: + uint8arraylist "^2.0.0" + uint8arrays "^5.0.0" + +uint8arraylist@^2.0.0, uint8arraylist@^2.4.3, uint8arraylist@^2.4.8: + version "2.4.8" + resolved "https://registry.yarnpkg.com/uint8arraylist/-/uint8arraylist-2.4.8.tgz#5a4d17f4defd77799cb38e93fd5db0f0dceddc12" + integrity sha512-vc1PlGOzglLF0eae1M8mLRTBivsvrGsdmJ5RbK3e+QRvRLOZfZhQROTwH/OfyF3+ZVUg9/8hE8bmKP2CvP9quQ== + dependencies: + uint8arrays "^5.0.1" + uint8arrays@^3.0.0: version "3.1.1" resolved "https://registry.yarnpkg.com/uint8arrays/-/uint8arrays-3.1.1.tgz#2d8762acce159ccd9936057572dade9459f65ae0" @@ -8792,6 +10016,13 @@ uint8arrays@^3.0.0: dependencies: multiformats "^9.4.2" +uint8arrays@^5.0.0, uint8arrays@^5.0.1, uint8arrays@^5.0.2, uint8arrays@^5.0.3, uint8arrays@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/uint8arrays/-/uint8arrays-5.1.0.tgz#14047c9bdf825d025b7391299436e5e50e7270f1" + integrity sha512-vA6nFepEmlSKkMBnLBaUMVvAC4G3CTmO58C12y4sq6WPDOR7mOFYOi7GlrQ4djeSbP6JG9Pv9tJDM97PedRSww== + dependencies: + multiformats "^13.0.0" + ultron@~1.1.0: version "1.1.1" resolved "https://registry.yarnpkg.com/ultron/-/ultron-1.1.1.tgz#9fe1536a10a664a65266a1e3ccf85fd36302bc9c" @@ -8802,6 +10033,11 @@ undici-types@~5.26.4: resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617" integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA== +undici@7.1.1: + version "7.1.1" + resolved "https://registry.yarnpkg.com/undici/-/undici-7.1.1.tgz#f11eceeaaaa34ff8a28da31b68b0b4a8d75562f0" + integrity sha512-WZkQ6eH9f5ZT93gaIffsbUaDpBwjbpvmMbfaEhOnbdUneurTESeRxwPGwjI28mRFESH3W3e8Togijh37ptOQqA== + universalify@^0.1.0: version "0.1.2" resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.1.2.tgz#b646f69be3942dabcecc9d6639c80dc105efaa66" @@ -8862,6 +10098,11 @@ url-set-query@^1.0.0: resolved "https://registry.yarnpkg.com/url-set-query/-/url-set-query-1.0.0.tgz#016e8cfd7c20ee05cafe7795e892bd0702faa339" integrity sha512-3AChu4NiXquPfeckE5R5cGdiHCMWJx1dwCWOmWIL4KHAziJNOFIYJlpGFeKDvwLPHovZRCxK3cYlwzqI9Vp+Gg== +urlpattern-polyfill@^10.0.0: + version "10.0.0" + resolved "https://registry.yarnpkg.com/urlpattern-polyfill/-/urlpattern-polyfill-10.0.0.tgz#f0a03a97bfb03cdf33553e5e79a2aadd22cac8ec" + integrity sha512-H/A06tKD7sS1O1X2SshBVeA5FLycRpjqiBeqGKmBwBDBy28EnRjORxTNe269KSSr5un5qyWi1iL61wLxpd+ZOg== + urlpattern-polyfill@^8.0.0: version "8.0.2" resolved "https://registry.yarnpkg.com/urlpattern-polyfill/-/urlpattern-polyfill-8.0.2.tgz#99f096e35eff8bf4b5a2aa7d58a1523d6ebc7ce5" @@ -8993,6 +10234,14 @@ wcwidth@^1.0.1: dependencies: defaults "^1.0.3" +weald@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/weald/-/weald-1.0.4.tgz#8858cf9186869deba58357ae10cf26eaada80bb0" + integrity sha512-+kYTuHonJBwmFhP1Z4YQK/dGi3jAnJGCYhyODFpHK73rbxnp9lnZQj7a2m+WVgn8fXr5bJaxUpF6l8qZpPeNWQ== + dependencies: + ms "^3.0.0-canary.1" + supports-color "^9.4.0" + web-streams-polyfill@^3.2.1: version "3.3.3" resolved "https://registry.yarnpkg.com/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz#2073b91a2fdb1fbfbd401e7de0ac9f8214cecb4b" @@ -9065,6 +10314,13 @@ web3-core@1.10.0: web3-core-requestmanager "1.10.0" web3-utils "1.10.0" +web3-errors@^1.2.0, web3-errors@^1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/web3-errors/-/web3-errors-1.3.1.tgz#163bc4d869f98614760b683d733c3ed1fb415d98" + integrity sha512-w3NMJujH+ZSW4ltIZZKtdbkbyQEvBzyp3JRn59Ckli0Nz4VMsVq8aF1bLWM7A2kuQ+yVEm3ySeNU+7mSRwx7RQ== + dependencies: + web3-types "^1.10.0" + web3-eth-abi@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3-eth-abi/-/web3-eth-abi-1.10.0.tgz#53a7a2c95a571e205e27fd9e664df4919483cce1" @@ -9081,6 +10337,17 @@ web3-eth-abi@1.7.0: "@ethersproject/abi" "5.0.7" web3-utils "1.7.0" +web3-eth-abi@4.4.1: + version "4.4.1" + resolved "https://registry.yarnpkg.com/web3-eth-abi/-/web3-eth-abi-4.4.1.tgz#1dca9d80341b3cd7a1ae07dc98080c2073d62a29" + integrity sha512-60ecEkF6kQ9zAfbTY04Nc9q4eEYM0++BySpGi8wZ2PD1tw/c0SDvsKhV6IKURxLJhsDlb08dATc3iD6IbtWJmg== + dependencies: + abitype "0.7.1" + web3-errors "^1.3.1" + web3-types "^1.10.0" + web3-utils "^4.3.3" + web3-validator "^2.0.6" + web3-eth-accounts@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3-eth-accounts/-/web3-eth-accounts-1.10.0.tgz#2942beca0a4291455f32cf09de10457a19a48117" @@ -9209,6 +10476,11 @@ web3-shh@1.10.0: web3-core-subscriptions "1.10.0" web3-net "1.10.0" +web3-types@^1.10.0, web3-types@^1.6.0: + version "1.10.0" + resolved "https://registry.yarnpkg.com/web3-types/-/web3-types-1.10.0.tgz#41b0b4d2dd75e919d5b6f37bf139e29f445db04e" + integrity sha512-0IXoaAFtFc8Yin7cCdQfB9ZmjafrbP6BO0f0KT/khMhXKUpoJ6yShrVhiNpyRBo8QQjuOagsWzwSK2H49I7sbw== + web3-utils@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3-utils/-/web3-utils-1.10.0.tgz#ca4c1b431a765c14ac7f773e92e0fd9377ccf578" @@ -9249,6 +10521,28 @@ web3-utils@^1.0.0-beta.31: randombytes "^2.1.0" utf8 "3.0.0" +web3-utils@^4.3.3: + version "4.3.3" + resolved "https://registry.yarnpkg.com/web3-utils/-/web3-utils-4.3.3.tgz#e380a1c03a050d3704f94bd08c1c9f50a1487205" + integrity sha512-kZUeCwaQm+RNc2Bf1V3BYbF29lQQKz28L0y+FA4G0lS8IxtJVGi5SeDTUkpwqqkdHHC7JcapPDnyyzJ1lfWlOw== + dependencies: + ethereum-cryptography "^2.0.0" + eventemitter3 "^5.0.1" + web3-errors "^1.3.1" + web3-types "^1.10.0" + web3-validator "^2.0.6" + +web3-validator@^2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/web3-validator/-/web3-validator-2.0.6.tgz#a0cdaa39e1d1708ece5fae155b034e29d6a19248" + integrity sha512-qn9id0/l1bWmvH4XfnG/JtGKKwut2Vokl6YXP5Kfg424npysmtRLe9DgiNBM9Op7QL/aSiaA0TVXibuIuWcizg== + dependencies: + ethereum-cryptography "^2.0.0" + util "^0.12.5" + web3-errors "^1.2.0" + web3-types "^1.6.0" + zod "^3.21.4" + web3@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/web3/-/web3-1.10.0.tgz#2fde0009f59aa756c93e07ea2a7f3ab971091274" @@ -9313,6 +10607,13 @@ whatwg-url@^5.0.0: tr46 "~0.0.3" webidl-conversions "^3.0.0" +wherearewe@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/wherearewe/-/wherearewe-2.0.1.tgz#37c97a7bf112dca8db34bfefb2f6c997af312bb8" + integrity sha512-XUguZbDxCA2wBn2LoFtcEhXL6AXo+hVjGonwhSTTTU9SzbWG8Xu3onNIpzf9j/mYUcJQ0f+m37SzG77G851uFw== + dependencies: + is-electron "^2.2.0" + which-module@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/which-module/-/which-module-1.0.0.tgz#bba63ca861948994ff307736089e3b96026c2a4f" @@ -9358,6 +10659,15 @@ workerpool@6.2.1: resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.2.1.tgz#46fc150c17d826b86a008e5a4508656777e9c343" integrity sha512-ILEIE97kDZvF9Wb9f6h5aXK4swSlKGUcOEGiIYb2OOu/IrDU9iwj0fD//SsA6E5ibwJxpEvhullJY4Sl4GcpAw== +"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0": + version "7.0.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + wrap-ansi@^2.0.0: version "2.1.0" resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-2.1.0.tgz#d8fc3d284dd05794fe84973caecdd1cf824fdd85" @@ -9366,6 +10676,15 @@ wrap-ansi@^2.0.0: string-width "^1.0.1" strip-ansi "^3.0.1" +wrap-ansi@^6.2.0: + version "6.2.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz#e9393ba07102e6c91a3b221478f0257cd2856e53" + integrity sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + wrap-ansi@^7.0.0: version "7.0.0" resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" @@ -9375,6 +10694,15 @@ wrap-ansi@^7.0.0: string-width "^4.1.0" strip-ansi "^6.0.0" +wrap-ansi@^8.1.0: + version "8.1.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-8.1.0.tgz#56dc22368ee570face1b49819975d9b9a5ead214" + integrity sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ== + dependencies: + ansi-styles "^6.1.0" + string-width "^5.0.1" + strip-ansi "^7.0.1" + wrappy@1: version "1.0.2" resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" @@ -9418,6 +10746,11 @@ ws@^7.2.0, ws@^7.4.5: resolved "https://registry.yarnpkg.com/ws/-/ws-7.5.9.tgz#54fa7db29f4c7cec68b1ddd3a89de099942bb591" integrity sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q== +ws@^7.5.10: + version "7.5.10" + resolved "https://registry.yarnpkg.com/ws/-/ws-7.5.10.tgz#58b5c20dc281633f6c19113f39b349bd8bd558d9" + integrity sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ== + xhr-request-promise@^0.1.2: version "0.1.3" resolved "https://registry.yarnpkg.com/xhr-request-promise/-/xhr-request-promise-0.1.3.tgz#2d5f4b16d8c6c893be97f1a62b0ed4cf3ca5f96c" @@ -9503,6 +10836,16 @@ yaml@1.10.2, yaml@^1.10.0, yaml@^1.10.2, yaml@^1.7.2: resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b" integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg== +yaml@2.6.1: + version "2.6.1" + resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.6.1.tgz#42f2b1ba89203f374609572d5349fb8686500773" + integrity sha512-7r0XPzioN/Q9kXBro/XPnA6kznR73DHq+GXh5ON7ZozRO6aMjbmiBuKste2wslTFkC5d1dw0GooOCepZXJ2SAg== + +yaml@^2.2.2: + version "2.7.0" + resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.7.0.tgz#aef9bb617a64c937a9a748803786ad8d3ffe1e98" + integrity sha512-+hSoy/QHluxmC9kCIJyL/uyFmLmc+e5CFR5Wa+bpIhIj85LVb9ZH2nVnqrHoSvKogwODv0ClqZkmiSSaIH5LTA== + yargs-parser@20.2.4: version "20.2.4" resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-20.2.4.tgz#b42890f14566796f85ae8e3a25290d205f154a54" @@ -9586,3 +10929,13 @@ yocto-queue@^0.1.0: version "0.1.0" resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== + +yoctocolors-cjs@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/yoctocolors-cjs/-/yoctocolors-cjs-2.1.2.tgz#f4b905a840a37506813a7acaa28febe97767a242" + integrity sha512-cYVsTjKl8b+FrnidjibDWskAv7UKOfcwaVZdp/it9n1s9fU3IkgDbhdIRKCW4JDsAlECJY0ytoVPT3sK6kideA== + +zod@^3.21.4: + version "3.24.1" + resolved "https://registry.yarnpkg.com/zod/-/zod-3.24.1.tgz#27445c912738c8ad1e9de1bea0359fa44d9d35ee" + integrity sha512-muH7gBL9sI1nciMZV67X5fTKKBLtwpZ5VBp1vsOQzj1MhrBZ4wlVCm3gedKZWLp0Oyel8sIGfeiz54Su+OVT+A== diff --git a/tests/runner-tests/yarn.lock b/tests/runner-tests/yarn.lock index 50e0c2b471f..9f3bdae834d 100644 --- a/tests/runner-tests/yarn.lock +++ b/tests/runner-tests/yarn.lock @@ -349,6 +349,40 @@ which "2.0.2" yaml "1.10.2" +"@graphprotocol/graph-cli@0.79.0-alpha-20240711124603-49edf22": + version "0.79.0-alpha-20240711124603-49edf22" + resolved "https://registry.yarnpkg.com/@graphprotocol/graph-cli/-/graph-cli-0.79.0-alpha-20240711124603-49edf22.tgz#4e3f6201932a0b68ce64d6badd8432cf2bead3c2" + integrity sha512-fZrdPiFbbbBVMnvsjfKA+j48WzzquaHQIpozBqnUKRPCV1n1NenIaq2nH16mlMwovRIS7AAIVCpa0QYQuPzw7Q== + dependencies: + "@float-capital/float-subgraph-uncrashable" "^0.0.0-alpha.4" + "@oclif/core" "2.8.6" + "@oclif/plugin-autocomplete" "^2.3.6" + "@oclif/plugin-not-found" "^2.4.0" + "@whatwg-node/fetch" "^0.8.4" + assemblyscript "0.19.23" + binary-install-raw "0.0.13" + chalk "3.0.0" + chokidar "3.5.3" + debug "4.3.4" + docker-compose "0.23.19" + dockerode "2.5.8" + fs-extra "9.1.0" + glob "9.3.5" + gluegun "5.1.6" + graphql "15.5.0" + immutable "4.2.1" + ipfs-http-client "55.0.0" + jayson "4.0.0" + js-yaml "3.14.1" + open "8.4.2" + prettier "3.0.3" + semver "7.4.0" + sync-request "6.1.0" + tmp-promise "3.0.3" + web3-eth-abi "1.7.0" + which "2.0.2" + yaml "1.10.2" + "@graphprotocol/graph-ts@0.30.0": version "0.30.0" resolved "https://registry.npmjs.org/@graphprotocol/graph-ts/-/graph-ts-0.30.0.tgz" @@ -1473,6 +1507,11 @@ defaults@^1.0.3: dependencies: clone "^1.0.2" +define-lazy-prop@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz#3f7ae421129bcaaac9bc74905c98a0009ec9ee7f" + integrity sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og== + delay@^5.0.0: version "5.0.0" resolved "https://registry.npmjs.org/delay/-/delay-5.0.0.tgz" @@ -1545,6 +1584,13 @@ ejs@3.1.6: dependencies: jake "^10.6.1" +ejs@3.1.8: + version "3.1.8" + resolved "https://registry.yarnpkg.com/ejs/-/ejs-3.1.8.tgz#758d32910c78047585c7ef1f92f9ee041c1c190b" + integrity sha512-/sXZeMlhS0ArkfX2Aw780gJzXSMPnKjtspYZv+f3NiKLlubezAHDU5+9xz6gd3/NhG3txQCo6xlglmTS+oTGEQ== + dependencies: + jake "^10.8.5" + ejs@^3.1.8: version "3.1.9" resolved "https://registry.npmjs.org/ejs/-/ejs-3.1.9.tgz" @@ -1996,6 +2042,42 @@ gluegun@5.1.2: which "2.0.2" yargs-parser "^21.0.0" +gluegun@5.1.6: + version "5.1.6" + resolved "https://registry.yarnpkg.com/gluegun/-/gluegun-5.1.6.tgz#74ec13193913dc610f5c1a4039972c70c96a7bad" + integrity sha512-9zbi4EQWIVvSOftJWquWzr9gLX2kaDgPkNR5dYWbM53eVvCI3iKuxLlnKoHC0v4uPoq+Kr/+F569tjoFbA4DSA== + dependencies: + apisauce "^2.1.5" + app-module-path "^2.2.0" + cli-table3 "0.6.0" + colors "1.4.0" + cosmiconfig "7.0.1" + cross-spawn "7.0.3" + ejs "3.1.8" + enquirer "2.3.6" + execa "5.1.1" + fs-jetpack "4.3.1" + lodash.camelcase "^4.3.0" + lodash.kebabcase "^4.1.1" + lodash.lowercase "^4.3.0" + lodash.lowerfirst "^4.3.1" + lodash.pad "^4.5.1" + lodash.padend "^4.6.1" + lodash.padstart "^4.6.1" + lodash.repeat "^4.1.0" + lodash.snakecase "^4.1.1" + lodash.startcase "^4.4.0" + lodash.trim "^4.5.1" + lodash.trimend "^4.5.1" + lodash.trimstart "^4.5.1" + lodash.uppercase "^4.3.0" + lodash.upperfirst "^4.3.1" + ora "4.0.2" + pluralize "^8.0.0" + semver "7.3.5" + which "2.0.2" + yargs-parser "^21.0.0" + graceful-fs@^4.1.6, graceful-fs@^4.2.0: version "4.2.11" resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3" @@ -2282,7 +2364,7 @@ is-binary-path@~2.1.0: dependencies: binary-extensions "^2.0.0" -is-docker@^2.0.0: +is-docker@^2.0.0, is-docker@^2.1.1: version "2.2.1" resolved "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz" integrity sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ== @@ -2922,6 +3004,15 @@ onetime@^5.1.0, onetime@^5.1.2: dependencies: mimic-fn "^2.1.0" +open@8.4.2: + version "8.4.2" + resolved "https://registry.yarnpkg.com/open/-/open-8.4.2.tgz#5b5ffe2a8f793dcd2aad73e550cb87b59cb084f9" + integrity sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ== + dependencies: + define-lazy-prop "^2.0.0" + is-docker "^2.1.1" + is-wsl "^2.2.0" + ora@4.0.2: version "4.0.2" resolved "https://registry.npmjs.org/ora/-/ora-4.0.2.tgz" @@ -3042,6 +3133,11 @@ prettier@1.19.1: resolved "https://registry.npmjs.org/prettier/-/prettier-1.19.1.tgz" integrity sha512-s7PoyDv/II1ObgQunCbB9PdLmUcBZcnWOcxDh7O0N/UwDEsHyqkW+Qh28jW+mVuCdx7gLB0BotYI1Y6uI9iyew== +prettier@3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.0.3.tgz#432a51f7ba422d1469096c0fdc28e235db8f9643" + integrity sha512-L/4pUDMxcNa8R/EthV08Zt42WBO4h1rarVtK0K+QJG0X187OLo7l699jWw0GKuwzkPQ//jMFA/8Xm6Fh3J/DAg== + process-nextick-args@~2.0.0: version "2.0.1" resolved "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz" diff --git a/tests/src/fixture/ethereum.rs b/tests/src/fixture/ethereum.rs index b20672ce563..fc651a512db 100644 --- a/tests/src/fixture/ethereum.rs +++ b/tests/src/fixture/ethereum.rs @@ -6,12 +6,15 @@ use super::{ test_ptr, CommonChainConfig, MutexBlockStreamBuilder, NoopAdapterSelector, NoopRuntimeAdapterBuilder, StaticBlockRefetcher, StaticStreamBuilder, Stores, TestChain, }; +use graph::blockchain::block_stream::{EntityOperationKind, EntitySourceOperation}; use graph::blockchain::client::ChainClient; -use graph::blockchain::{BlockPtr, TriggersAdapterSelector}; +use graph::blockchain::{BlockPtr, Trigger, TriggersAdapterSelector}; use graph::cheap_clone::CheapClone; +use graph::data_source::subgraph; use graph::prelude::ethabi::ethereum_types::H256; use graph::prelude::web3::types::{Address, Log, Transaction, H160}; -use graph::prelude::{ethabi, tiny_keccak, LightEthereumBlock, ENV_VARS}; +use graph::prelude::{ethabi, tiny_keccak, DeploymentHash, Entity, LightEthereumBlock, ENV_VARS}; +use graph::schema::EntityType; use graph::{blockchain::block_stream::BlockWithTriggers, prelude::ethabi::ethereum_types::U64}; use graph_chain_ethereum::network::EthereumNetworkAdapters; use graph_chain_ethereum::trigger::LogRef; @@ -81,7 +84,10 @@ pub fn genesis() -> BlockWithTriggers { number: Some(U64::from(ptr.number)), ..Default::default() })), - trigger_data: vec![EthereumTrigger::Block(ptr, EthereumBlockTriggerType::End)], + trigger_data: vec![Trigger::Chain(EthereumTrigger::Block( + ptr, + EthereumBlockTriggerType::End, + ))], } } @@ -128,7 +134,10 @@ pub fn empty_block(parent_ptr: BlockPtr, ptr: BlockPtr) -> BlockWithTriggers, payload: impl Into, + source: DeploymentHash, + entity: Entity, + entity_type: EntityType, + entity_op: EntityOperationKind, + vid: i64, + source_idx: u32, +) { + let entity = EntitySourceOperation { + entity: entity, + entity_type: entity_type, + entity_op: entity_op, + vid, + }; + + block + .trigger_data + .push(Trigger::Subgraph(subgraph::TriggerData { + source, + entity, + source_idx, + })); } pub fn push_test_command( @@ -175,12 +211,16 @@ pub fn push_test_command( }); block .trigger_data - .push(EthereumTrigger::Log(LogRef::FullLog(log, None))) + .push(Trigger::Chain(EthereumTrigger::Log(LogRef::FullLog( + log, None, + )))) } pub fn push_test_polling_trigger(block: &mut BlockWithTriggers) { - block.trigger_data.push(EthereumTrigger::Block( - block.ptr(), - EthereumBlockTriggerType::End, - )) + block + .trigger_data + .push(Trigger::Chain(EthereumTrigger::Block( + block.ptr(), + EthereumBlockTriggerType::End, + ))) } diff --git a/tests/src/fixture/mod.rs b/tests/src/fixture/mod.rs index 4eb5fbb42b1..4e8127875a0 100644 --- a/tests/src/fixture/mod.rs +++ b/tests/src/fixture/mod.rs @@ -1,7 +1,7 @@ pub mod ethereum; pub mod substreams; -use std::collections::HashMap; +use std::collections::{BTreeSet, HashMap}; use std::marker::PhantomData; use std::sync::Mutex; use std::time::{Duration, Instant}; @@ -14,17 +14,18 @@ use graph::blockchain::block_stream::{ }; use graph::blockchain::{ Block, BlockHash, BlockPtr, Blockchain, BlockchainMap, ChainIdentifier, RuntimeAdapter, - TriggersAdapter, TriggersAdapterSelector, + TriggerFilterWrapper, TriggersAdapter, TriggersAdapterSelector, }; use graph::cheap_clone::CheapClone; use graph::components::link_resolver::{ArweaveClient, ArweaveResolver, FileSizeLimit}; use graph::components::metrics::MetricsRegistry; use graph::components::network_provider::ChainName; -use graph::components::store::{BlockStore, DeploymentLocator, EthereumCallCache}; +use graph::components::store::{BlockStore, DeploymentLocator, EthereumCallCache, SourceableStore}; use graph::components::subgraph::Settings; use graph::data::graphql::load_manager::LoadManager; use graph::data::query::{Query, QueryTarget}; use graph::data::subgraph::schema::{SubgraphError, SubgraphHealth}; +use graph::data_source::DataSource; use graph::endpoint::EndpointMetrics; use graph::env::EnvVars; use graph::firehose::{FirehoseEndpoint, FirehoseEndpoints, SubgraphLimit}; @@ -209,14 +210,20 @@ impl TestContext { let (logger, deployment, raw) = self.get_runner_context().await; let tp: Box> = Box::new(SubgraphTriggerProcessor {}); + let deployment_status_metric = self + .instance_manager + .new_deployment_status_metric(&deployment); + self.instance_manager - .build_subgraph_runner( + .build_subgraph_runner_inner( logger, self.env_vars.cheap_clone(), deployment, raw, Some(stop_block.block_number()), tp, + deployment_status_metric, + true, ) .await .unwrap() @@ -234,14 +241,20 @@ impl TestContext { graph_chain_substreams::TriggerProcessor::new(deployment.clone()), ); + let deployment_status_metric = self + .instance_manager + .new_deployment_status_metric(&deployment); + self.instance_manager - .build_subgraph_runner( + .build_subgraph_runner_inner( logger, self.env_vars.cheap_clone(), deployment, raw, Some(stop_block.block_number()), tp, + deployment_status_metric, + true, ) .await .unwrap() @@ -725,14 +738,27 @@ impl BlockStreamBuilder for MutexBlockStreamBuilder { async fn build_polling( &self, - _chain: &C, - _deployment: DeploymentLocator, - _start_blocks: Vec, - _subgraph_current_block: Option, - _filter: Arc<::TriggerFilter>, - _unified_api_version: graph::data::subgraph::UnifiedMappingApiVersion, + chain: &C, + deployment: DeploymentLocator, + start_blocks: Vec, + source_subgraph_stores: Vec>, + subgraph_current_block: Option, + filter: Arc>, + unified_api_version: graph::data::subgraph::UnifiedMappingApiVersion, ) -> anyhow::Result>> { - unimplemented!("only firehose mode should be used for tests") + let builder = self.0.lock().unwrap().clone(); + + builder + .build_polling( + chain, + deployment, + start_blocks, + source_subgraph_stores, + subgraph_current_block, + filter, + unified_api_version, + ) + .await } } @@ -790,11 +816,22 @@ where _chain: &C, _deployment: DeploymentLocator, _start_blocks: Vec, - _subgraph_current_block: Option, - _filter: Arc, + _source_subgraph_stores: Vec>, + subgraph_current_block: Option, + _filter: Arc>, _unified_api_version: graph::data::subgraph::UnifiedMappingApiVersion, ) -> anyhow::Result>> { - unimplemented!("only firehose mode should be used for tests") + let current_idx = subgraph_current_block.map(|current_block| { + self.chain + .iter() + .enumerate() + .find(|(_, b)| b.ptr() == current_block) + .unwrap() + .0 + }); + Ok(Box::new(StaticStream { + stream: Box::pin(stream_events(self.chain.clone(), current_idx)), + })) } } @@ -873,10 +910,7 @@ struct NoopRuntimeAdapter { } impl RuntimeAdapter for NoopRuntimeAdapter { - fn host_fns( - &self, - _ds: &::DataSource, - ) -> Result, Error> { + fn host_fns(&self, _ds: &DataSource) -> Result, Error> { Ok(vec![]) } } @@ -959,11 +993,23 @@ impl TriggersAdapter for MockTriggersAdapter { todo!() } + async fn load_block_ptrs_by_numbers( + &self, + _logger: Logger, + _block_numbers: BTreeSet, + ) -> Result, Error> { + unimplemented!() + } + + async fn chain_head_ptr(&self) -> Result, Error> { + todo!() + } + async fn scan_triggers( &self, _from: BlockNumber, _to: BlockNumber, - _filter: &::TriggerFilter, + _filter: &C::TriggerFilter, ) -> Result<(Vec>, BlockNumber), Error> { todo!() } diff --git a/tests/src/subgraph.rs b/tests/src/subgraph.rs index 7696ce4b5a6..810b87cbb78 100644 --- a/tests/src/subgraph.rs +++ b/tests/src/subgraph.rs @@ -1,4 +1,5 @@ use std::{ + fs, io::{Read as _, Write as _}, time::{Duration, Instant}, }; @@ -7,6 +8,7 @@ use anyhow::anyhow; use graph::prelude::serde_json::{self, Value}; use serde::Deserialize; +use serde_yaml; use tokio::{process::Command, time::sleep}; use crate::{ @@ -52,12 +54,24 @@ impl Subgraph { Self::patch(&dir, contracts).await?; + // Check if subgraph has subgraph datasources + let yaml_content = fs::read_to_string(dir.path.join("subgraph.yaml.patched"))?; + let yaml: serde_yaml::Value = serde_yaml::from_str(&yaml_content)?; + let has_subgraph_datasource = yaml["dataSources"] + .as_sequence() + .and_then(|ds| ds.iter().find(|d| d["kind"].as_str() == Some("subgraph"))) + .is_some(); + // graph codegen subgraph.yaml let mut prog = Command::new(&CONFIG.graph_cli); - let cmd = prog - .arg("codegen") - .arg("subgraph.yaml.patched") - .current_dir(&dir.path); + let mut cmd = prog.arg("codegen").arg("subgraph.yaml.patched"); + + if has_subgraph_datasource { + cmd = cmd.arg(format!("--ipfs={}", CONFIG.graph_node.ipfs_uri)); + } + + cmd = cmd.current_dir(&dir.path); + run_checked(cmd).await?; // graph create --node diff --git a/tests/tests/integration_tests.rs b/tests/tests/integration_tests.rs index f2ff40f9ad2..d10df25698b 100644 --- a/tests/tests/integration_tests.rs +++ b/tests/tests/integration_tests.rs @@ -13,7 +13,7 @@ use std::future::Future; use std::pin::Pin; use std::time::{Duration, Instant}; -use anyhow::{anyhow, bail, Context}; +use anyhow::{anyhow, bail, Context, Result}; use graph::futures03::StreamExt; use graph::prelude::serde_json::{json, Value}; use graph::prelude::web3::types::U256; @@ -95,6 +95,7 @@ impl TestResult { struct TestCase { name: String, test: TestFn, + source_subgraph: Option, } impl TestCase { @@ -102,20 +103,84 @@ impl TestCase { where T: Future> + Send + 'static, { - fn force_boxed(f: fn(TestContext) -> T) -> TestFn - where - T: Future> + Send + 'static, - { - Box::new(move |ctx| Box::pin(f(ctx))) - } - Self { name: name.to_string(), - test: force_boxed(test), + test: Box::new(move |ctx| Box::pin(test(ctx))), + source_subgraph: None, } } + fn new_with_source_subgraph( + name: &str, + test: fn(TestContext) -> T, + source_subgraph: &str, + ) -> Self + where + T: Future> + Send + 'static, + { + let mut test_case = Self::new(name, test); + test_case.source_subgraph = Some(source_subgraph.to_string()); + test_case + } + + fn new_with_multiple_source_subgraphs( + name: &str, + test: fn(TestContext) -> T, + source_subgraphs: Vec<&str>, + ) -> Self + where + T: Future> + Send + 'static, + { + let mut test_case = Self::new(name, test); + test_case.source_subgraph = Some(source_subgraphs.join(",")); + test_case + } + + async fn deploy_and_wait( + &self, + subgraph_name: &str, + contracts: &[Contract], + ) -> Result { + status!(&self.name, "Deploying subgraph"); + let subgraph_name = match Subgraph::deploy(&subgraph_name, contracts).await { + Ok(name) => name, + Err(e) => { + error!(&self.name, "Deploy failed"); + return Err(anyhow!(e.context("Deploy failed"))); + } + }; + + status!(&self.name, "Waiting for subgraph to become ready"); + let subgraph = match Subgraph::wait_ready(&subgraph_name).await { + Ok(subgraph) => subgraph, + Err(e) => { + error!(&self.name, "Subgraph never synced or failed"); + return Err(anyhow!(e.context("Subgraph never synced or failed"))); + } + }; + + if subgraph.healthy { + status!(&self.name, "Subgraph ({}) is synced", subgraph.deployment); + } else { + status!(&self.name, "Subgraph ({}) has failed", subgraph.deployment); + } + + Ok(subgraph) + } + async fn run(self, contracts: &[Contract]) -> TestResult { + // If a subgraph has subgraph datasources, deploy them first + if let Some(_subgraphs) = &self.source_subgraph { + if let Err(e) = self.deploy_multiple_sources(contracts).await { + error!(&self.name, "source subgraph deployment failed"); + return TestResult { + name: self.name.clone(), + subgraph: None, + status: TestStatus::Err(e), + }; + } + } + status!(&self.name, "Deploying subgraph"); let subgraph_name = match Subgraph::deploy(&self.name, contracts).await { Ok(name) => name, @@ -174,6 +239,20 @@ impl TestCase { status, } } + + async fn deploy_multiple_sources(&self, contracts: &[Contract]) -> Result<()> { + if let Some(sources) = &self.source_subgraph { + for source in sources.split(",") { + let subgraph = self.deploy_and_wait(source, contracts).await?; + status!( + source, + "source subgraph deployed with hash {}", + subgraph.deployment + ); + } + } + Ok(()) + } } /// Run the given `query` against the `subgraph` and check that the result @@ -439,6 +518,91 @@ async fn test_eth_api(ctx: TestContext) -> anyhow::Result<()> { Ok(()) } +async fn subgraph_data_sources(ctx: TestContext) -> anyhow::Result<()> { + let subgraph = ctx.subgraph; + assert!(subgraph.healthy); + let expected_response = json!({ + "mirrorBlocks": [ + { "id": "1-v1", "number": "1" }, + { "id": "1-v2", "number": "1" }, + { "id": "1-v3", "number": "1" }, + { "id": "2-v1", "number": "2" }, + { "id": "2-v2", "number": "2" }, + { "id": "2-v3", "number": "2" }, + { "id": "3-v1", "number": "3" }, + { "id": "3-v2", "number": "3" }, + { "id": "3-v3", "number": "3" }, + { "id": "4-v1", "number": "4" }, + { "id": "4-v2", "number": "4" }, + { "id": "4-v3", "number": "4" }, + { "id": "5-v1", "number": "5" }, + { "id": "5-v2", "number": "5" }, + { "id": "5-v3", "number": "5" }, + { "id": "6-v1", "number": "6" }, + { "id": "6-v2", "number": "6" }, + { "id": "6-v3", "number": "6" }, + { "id": "7-v1", "number": "7" }, + { "id": "7-v2", "number": "7" }, + { "id": "7-v3", "number": "7" }, + { "id": "8-v1", "number": "8" }, + { "id": "8-v2", "number": "8" }, + { "id": "8-v3", "number": "8" }, + { "id": "9-v1", "number": "9" }, + { "id": "9-v2", "number": "9" }, + { "id": "9-v3", "number": "9" }, + { "id": "10-v1", "number": "10" }, + { "id": "10-v2", "number": "10" }, + { "id": "10-v3", "number": "10" }, + ] + }); + + query_succeeds( + "Blocks should be right", + &subgraph, + "{ mirrorBlocks(where: {number_lte: 10}, orderBy: number) { id, number } }", + expected_response, + ) + .await?; + + let expected_response = json!({ + "mirrorBlock": { "id": "TEST", "number": "1", "testMessage": "Created at block 1" }, + }); + + query_succeeds( + "Blocks should be right", + &subgraph, + "{ mirrorBlock(id: \"TEST\", block: {number: 1}) { id, number, testMessage } }", + expected_response, + ) + .await?; + + let expected_response = json!({ + "mirrorBlock": { "id": "TEST", "number": "1", "testMessage": "Updated at block 2" }, + }); + + query_succeeds( + "Blocks should be right", + &subgraph, + "{ mirrorBlock(id: \"TEST\", block: {number: 2}) { id, number, testMessage } }", + expected_response, + ) + .await?; + + let expected_response = json!({ + "mirrorBlock": null, + }); + + query_succeeds( + "Blocks should be right", + &subgraph, + "{ mirrorBlock(id: \"TEST\", block: {number: 3}) { id, number, testMessage } }", + expected_response, + ) + .await?; + + Ok(()) +} + async fn test_topic_filters(ctx: TestContext) -> anyhow::Result<()> { let subgraph = ctx.subgraph; assert!(subgraph.healthy); @@ -769,6 +933,33 @@ async fn test_missing(_sg: Subgraph) -> anyhow::Result<()> { Err(anyhow!("This test is missing")) } +async fn test_multiple_subgraph_datasources(ctx: TestContext) -> anyhow::Result<()> { + let subgraph = ctx.subgraph; + assert!(subgraph.healthy); + + // Test querying data aggregated from multiple sources + let exp = json!({ + "aggregatedDatas": [ + { + "id": "0", + "sourceA": "from source A", + "sourceB": "from source B", + "first": "sourceA" + }, + ] + }); + + query_succeeds( + "should aggregate data from multiple sources", + &subgraph, + "{ aggregatedDatas(first: 1) { id sourceA sourceB first } }", + exp, + ) + .await?; + + Ok(()) +} + /// The main test entrypoint. #[tokio::test] async fn integration_tests() -> anyhow::Result<()> { @@ -790,6 +981,16 @@ async fn integration_tests() -> anyhow::Result<()> { TestCase::new("timestamp", test_timestamp), TestCase::new("ethereum-api-tests", test_eth_api), TestCase::new("topic-filter", test_topic_filters), + TestCase::new_with_source_subgraph( + "subgraph-data-sources", + subgraph_data_sources, + "source-subgraph", + ), + TestCase::new_with_multiple_source_subgraphs( + "multiple-subgraph-datasources", + test_multiple_subgraph_datasources, + vec!["source-subgraph-a", "source-subgraph-b"], + ), ]; // Filter the test cases if a specific test name is provided diff --git a/tests/tests/runner_tests.rs b/tests/tests/runner_tests.rs index caeb67e9adf..ac645884b5d 100644 --- a/tests/tests/runner_tests.rs +++ b/tests/tests/runner_tests.rs @@ -501,10 +501,19 @@ async fn substreams_trigger_filter_construction() -> anyhow::Result<()> { let runner = ctx.runner_substreams(test_ptr(0)).await; let filter = runner.build_filter_for_test(); - assert_eq!(filter.module_name(), "graph_out"); - assert_eq!(filter.modules().as_ref().unwrap().modules.len(), 2); - assert_eq!(filter.start_block().unwrap(), 0); - assert_eq!(filter.data_sources_len(), 1); + assert_eq!(filter.chain_filter.module_name(), "graph_out"); + assert_eq!( + filter + .chain_filter + .modules() + .as_ref() + .unwrap() + .modules + .len(), + 2 + ); + assert_eq!(filter.chain_filter.start_block().unwrap(), 0); + assert_eq!(filter.chain_filter.data_sources_len(), 1); Ok(()) } @@ -526,7 +535,11 @@ async fn end_block() -> anyhow::Result<()> { let runner = ctx.runner(block_ptr.clone()).await; let runner = runner.run_for_test(false).await.unwrap(); let filter = runner.context().filter.as_ref().unwrap(); - let addresses = filter.log().contract_addresses().collect::>(); + let addresses = filter + .chain_filter + .log() + .contract_addresses() + .collect::>(); if should_contain_addr { assert!(addresses.contains(&addr));