diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 44d81737f08..00000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,7 +0,0 @@ -version: 2.1 -orbs: - hello: circleci/hello-build@0.0.5 -workflows: - "Hello Workflow": - jobs: - - hello/hello-build diff --git a/.dockerignore b/.dockerignore index 5874b8c3054..0bb3e477f64 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,2 +1,6 @@ -.git target +docker/data +node_modules +.dockerignore +.git +.gitignore diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml index f4fdaad071d..102a8b53e78 100644 --- a/.github/workflows/audit.yml +++ b/.github/workflows/audit.yml @@ -10,6 +10,7 @@ on: jobs: security_audit: runs-on: ubuntu-latest + timeout-minutes: 10 steps: - uses: actions/checkout@v2 - uses: actions-rs/audit-check@v1 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 984ca9964ec..11976751397 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,11 +14,8 @@ env: jobs: unit-tests: name: Run unit tests - strategy: - fail-fast: false - matrix: - rust: ["stable"] runs-on: ubuntu-latest + timeout-minutes: 60 services: ipfs: image: ipfs/go-ipfs:v0.10.0 @@ -29,6 +26,7 @@ jobs: env: POSTGRES_PASSWORD: postgres POSTGRES_DB: graph_node_test + POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C" options: >- --health-cmd pg_isready --health-interval 10s @@ -36,43 +34,26 @@ jobs: --health-retries 5 ports: - 5432:5432 + env: + RUSTFLAGS: "-C link-arg=-fuse-ld=lld -D warnings" steps: - name: Checkout sources uses: actions/checkout@v2 - - - name: Cache cargo registry - uses: actions/cache@v2 - with: - path: | - ~/.cargo/registry - ~/.cargo/git - key: test-cargo-${{ hashFiles('**/Cargo.toml') }} - - - name: Install rust toolchain - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: ${{ matrix.rust }} - override: true + - uses: Swatinem/rust-cache@v2 - name: Install lld - run: sudo apt-get install -y lld + run: sudo apt-get install -y lld protobuf-compiler - name: Run unit tests uses: actions-rs/cargo@v1 - env: - RUSTFLAGS: "-C link-arg=-fuse-ld=lld -D warnings" with: command: test args: --verbose --workspace --exclude graph-tests -- --nocapture runner-tests: name: Subgraph Runner integration tests - strategy: - fail-fast: false - matrix: - rust: ["stable"] runs-on: ubuntu-latest + timeout-minutes: 60 services: ipfs: image: ipfs/go-ipfs:v0.10.0 @@ -83,6 +64,7 @@ jobs: env: POSTGRES_PASSWORD: postgres POSTGRES_DB: graph_node_test + POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C" options: >- --health-cmd pg_isready --health-interval 10s @@ -90,33 +72,20 @@ jobs: --health-retries 5 ports: - 5432:5432 + env: + RUSTFLAGS: "-C link-arg=-fuse-ld=lld -D warnings" steps: - name: Checkout sources uses: actions/checkout@v2 - - - name: Cache cargo registry - uses: actions/cache@v2 - with: - path: | - ~/.cargo/registry - ~/.cargo/git - key: test-cargo-${{ hashFiles('**/Cargo.toml') }} - - - name: Install rust toolchain - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: ${{ matrix.rust }} - override: true + - uses: Swatinem/rust-cache@v2 - name: Install lld - run: sudo apt-get install -y lld + run: sudo apt-get install -y lld protobuf-compiler - name: Run runner tests id: runner-tests-1 uses: actions-rs/cargo@v1 env: - RUSTFLAGS: "-C link-arg=-fuse-ld=lld -D warnings" TESTS_GANACHE_HARD_WAIT_SECONDS: "30" with: command: test @@ -124,54 +93,38 @@ jobs: integration-tests: name: Run integration tests - strategy: - fail-fast: false - matrix: - rust: ["stable"] runs-on: ubuntu-latest - + timeout-minutes: 60 + env: + RUSTFLAGS: "-C link-arg=-fuse-ld=lld -D warnings" steps: - name: Checkout sources uses: actions/checkout@v2 - - - name: Cache cargo registry - uses: actions/cache@v2 - with: - path: | - ~/.cargo/registry - ~/.cargo/git - key: test-cargo-${{ hashFiles('**/Cargo.toml') }} + - uses: Swatinem/rust-cache@v2 - name: Install Node 14 - uses: actions/setup-node@v2 + uses: actions/setup-node@v3 with: node-version: "14" - - - name: Install rust toolchain - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: ${{ matrix.rust }} - override: true + cache: yarn + cache-dependency-path: "tests/integration-tests/yarn.lock" - name: Install lld and jq - run: sudo apt-get install -y lld jq + run: sudo apt-get install -y lld jq protobuf-compiler - name: Build graph-node - env: - RUSTFLAGS: "-C link-arg=-fuse-ld=lld -D warnings" uses: actions-rs/cargo@v1 with: command: build + args: --bin graph-node # Integration tests are a bit flaky, running them twice increases the - # chances of one run succeeding + # chances of one run succeeding. - name: Run integration tests (round 1) id: integration-tests-1 uses: actions-rs/cargo@v1 env: - RUSTFLAGS: "-C link-arg=-fuse-ld=lld -D warnings" - N_CONCURRENT_TESTS: "1" + N_CONCURRENT_TESTS: "4" TESTS_GANACHE_HARD_WAIT_SECONDS: "30" with: command: test @@ -182,8 +135,7 @@ jobs: uses: actions-rs/cargo@v1 if: ${{ steps.integration-tests-1.outcome == 'failure' }} env: - RUSTFLAGS: "-C link-arg=-fuse-ld=lld -D warnings" - N_CONCURRENT_TESTS: "1" + N_CONCURRENT_TESTS: "4" TESTS_GANACHE_HARD_WAIT_SECONDS: "30" with: command: test @@ -191,50 +143,28 @@ jobs: rustfmt: name: Check rustfmt style - strategy: - matrix: - rust: ["stable"] runs-on: ubuntu-latest + timeout-minutes: 10 + env: + RUSTFLAGS: "-D warnings" steps: - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: ${{ matrix.rust }} - components: rustfmt - override: true - name: Check formatting uses: actions-rs/cargo@v1 - env: - RUSTFLAGS: "-D warnings" with: command: fmt args: --all -- --check clippy: name: Report Clippy warnings - strategy: - matrix: - rust: ["stable"] runs-on: ubuntu-latest + timeout-minutes: 60 steps: - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: ${{ matrix.rust }} - components: clippy - override: true # Unlike rustfmt, Clippy actually compiles stuff so it benefits from # caching. - - name: Cache cargo registry - uses: actions/cache@v2 - with: - path: | - ~/.cargo/registry - ~/.cargo/git - key: check-cargo-${{ hashFiles('**/Cargo.toml') }} + - uses: Swatinem/rust-cache@v2 - name: Run Clippy uses: actions-rs/cargo@v1 @@ -243,59 +173,23 @@ jobs: continue-on-error: true with: command: clippy + args: --no-deps release-check: name: Build in release mode - strategy: - matrix: - rust: ["stable"] runs-on: ubuntu-latest + timeout-minutes: 60 + env: + RUSTFLAGS: "-D warnings" steps: - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: ${{ matrix.rust }} - override: true - - name: Cache cargo registry - uses: actions/cache@v2 - with: - path: | - ~/.cargo/registry - ~/.cargo/git - key: check-cargo-${{ hashFiles('**/Cargo.toml') }} - + - uses: Swatinem/rust-cache@v2 - name: Install dependencies run: | sudo apt-get update - sudo apt-get -y install libpq-dev - - - name: Cargo check (debug) - uses: actions-rs/cargo@v1 - env: - RUSTFLAGS: "-D warnings" - with: - command: check - args: --tests - + sudo apt-get -y install libpq-dev protobuf-compiler - name: Cargo check (release) - env: - RUSTFLAGS: "-D warnings" uses: actions-rs/cargo@v1 with: command: check args: --release - - version-check: - name: Check that all graph-node crates have the same version - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Checks through all Cargo.toml files, making sure their version is unique - run: | - source 'scripts/toml-utils.sh' - - ALL_TOML_FILE_NAMES=$(get_all_toml_files) - ALL_TOML_VERSIONS=$(get_all_toml_versions $ALL_TOML_FILE_NAMES) - - ./scripts/lines-unique.sh $ALL_TOML_VERSIONS diff --git a/.github/workflows/code-coverage.yml b/.github/workflows/code-coverage.yml new file mode 100644 index 00000000000..2f476c797cb --- /dev/null +++ b/.github/workflows/code-coverage.yml @@ -0,0 +1,74 @@ +name: Code coverage + +on: + workflow_dispatch: + schedule: + # Run it every 3 days. + - cron: "0 3 * * *" + +env: + CARGO_TERM_COLOR: always + RUST_BACKTRACE: full + THEGRAPH_STORE_POSTGRES_DIESEL_URL: "postgresql://postgres:postgres@localhost:5432/graph_node_test" + RUSTFLAGS: "-C link-arg=-fuse-ld=lld -D warnings" + N_CONCURRENT_TESTS: "4" + TESTS_GANACHE_HARD_WAIT_SECONDS: "30" + +jobs: + # Heavily inspired from . + coverage: + name: Code coverage of integration tests + runs-on: ubuntu-latest + timeout-minutes: 60 + services: + ipfs: + image: ipfs/go-ipfs:v0.10.0 + ports: + - 5001:5001 + postgres: + image: postgres + env: + POSTGRES_PASSWORD: postgres + POSTGRES_DB: graph_node_test + POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C" + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + - 5432:5432 + steps: + - uses: actions/checkout@v3 + - uses: Swatinem/rust-cache@v2 + - name: Install Node 14 + uses: actions/setup-node@v3 + with: + node-version: "14" + cache: yarn + cache-dependency-path: "tests/integration-tests/yarn.lock" + - name: Install lld + run: sudo apt-get install -y lld jq + - uses: actions-rs/cargo@v1 + with: + command: install + args: cargo-llvm-cov + + - name: Build graph-node + uses: actions-rs/cargo@v1 + with: + command: build + args: --bin graph-node + + - name: Generate code coverage + run: cargo llvm-cov --package graph-tests --lcov --output-path lcov.info -- --nocapture + - uses: actions/upload-artifact@v3 + with: + name: code-coverage-info + path: lcov.info + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v3 + with: + # No token needed, because the repo is public. + files: lcov.info + fail_ci_if_error: true diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 00000000000..69256e24ebf --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,35 @@ +name: Stale PR handler + +permissions: + contents: write + issues: write + pull-requests: write + +on: + workflow_dispatch: + schedule: + # Run it once a day. + - cron: "0 0 * * *" + +jobs: + stale: + runs-on: ubuntu-latest + timeout-minutes: 20 + steps: + - uses: actions/stale@main + id: stale + with: + # PRs + days-before-pr-stale: 90 + days-before-pr-close: 14 + stale-pr-message: > + This pull request hasn't had any activity for the last 90 days. If + there's no more activity over the course of the next 14 days, it will + automatically be closed. + # Issues + days-before-issue-stale: 180 + # Never close stale issues, only mark them as such. + days-before-issue-close: -1 + stale-issue-message: > + Looks like this issue has been open for 6 months with no activity. + Is it still relevant? If not, please remember to close it. diff --git a/.gitignore b/.gitignore index 874a81467c3..bf084b6c226 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,7 @@ +# Code coverage stuff +*.profraw +lcov.info + # Generated by Cargo # will have compiled files and executables /target/ @@ -14,13 +18,13 @@ /docker/data/ /docker/parity/chains/ /docker/parity/network/ +**/*/tests/fixtures/ipfs_folder/random.txt -/tests/integration-tests/**/build -/tests/integration-tests/**/generated -/tests/integration-tests/**/node_modules -/tests/integration-tests/**/yarn.lock -/tests/integration-tests/**/yarn-error.log +/tests/**/build +/tests/**/generated +/tests/**/node_modules +/tests/**/yarn-error.log # Built solidity contracts. -/tests/integration-tests/**/bin -/tests/integration-tests/**/truffle_output +/tests/**/bin +/tests/**/truffle_output diff --git a/Cargo.lock b/Cargo.lock index 0ea2c3b8d44..690bbc33723 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -47,27 +47,18 @@ dependencies = [ [[package]] name = "android_system_properties" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7ed72e1635e121ca3e79420540282af22da58be50de153d36f81ddc6b83aa9e" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" dependencies = [ "libc", ] -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - [[package]] name = "anyhow" -version = "1.0.65" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98161a4e3e2184da77bb14f02184cdd111e83bbbcc9979dfee3c44b9a85f5602" +checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" [[package]] name = "arc-swap" @@ -99,6 +90,16 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eab1c04a571841102f5345a8fc0f6bb3d31c315dec879b5c6e42e40ce7ffa34e" +[[package]] +name = "assert-json-diff" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "async-recursion" version = "1.0.0" @@ -144,9 +145,9 @@ dependencies = [ [[package]] name = "atomic_refcell" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73b5e5f48b927f04e952dedc932f31995a65a0bf65ec971c74436e51bf6e970d" +checksum = "857253367827bd9d0fd973f0ef15506a96e79e41b0ad7aa691203a4e3214f6c8" [[package]] name = "atty" @@ -154,7 +155,7 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "hermit-abi", + "hermit-abi 0.1.19", "libc", "winapi", ] @@ -167,9 +168,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "axum" -version = "0.5.4" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4af7447fc1214c1f3a1ace861d0216a6c8bb13965b64bbad9650f375b67689a" +checksum = "08b108ad2665fa3f6e6a517c3d80ec3e77d224c47d605167aefaa5d7ef97fa48" dependencies = [ "async-trait", "axum-core", @@ -185,20 +186,20 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", + "rustversion", "serde", "sync_wrapper", - "tokio", - "tower 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tower 0.4.13", "tower-http", - "tower-layer 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tower-layer 0.3.2", "tower-service 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "axum-core" -version = "0.2.3" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bdc19781b16e32f8a7200368a336fa4509d4b72ef15dd4e41df5290855ee1e6" +checksum = "79b8558f5a0581152dc94dcd289132a1d377494bdeafcd41869b3258e3e2ad92" dependencies = [ "async-trait", "bytes", @@ -206,6 +207,9 @@ dependencies = [ "http", "http-body", "mime", + "rustversion", + "tower-layer 0.3.2", + "tower-service 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -231,9 +235,15 @@ checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "base64" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" [[package]] name = "base64-url" @@ -241,7 +251,16 @@ version = "1.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67a99c239d0c7e77c85dddfa9cebce48704b3c49550fcd3b84dd637e4484899f" dependencies = [ - "base64", + "base64 0.13.1", +] + +[[package]] +name = "beef" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" +dependencies = [ + "serde", ] [[package]] @@ -291,7 +310,7 @@ checksum = "72936ee4afc7f8f736d1c38383b56480b5497b4617b4a77bdbf1d2ababc76127" dependencies = [ "arrayref", "arrayvec 0.7.2", - "constant_time_eq", + "constant_time_eq 0.1.5", ] [[package]] @@ -302,7 +321,7 @@ checksum = "db539cc2b5f6003621f1cd9ef92d7ded8ea5232c7de0f9faa2de251cd98730d4" dependencies = [ "arrayref", "arrayvec 0.7.2", - "constant_time_eq", + "constant_time_eq 0.1.5", ] [[package]] @@ -315,23 +334,23 @@ dependencies = [ "arrayvec 0.5.2", "cc", "cfg-if 0.1.10", - "constant_time_eq", + "constant_time_eq 0.1.5", "crypto-mac 0.8.0", "digest 0.9.0", ] [[package]] name = "blake3" -version = "1.3.1" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08e53fc5a564bb15bfe6fae56bd71522205f1f91893f9c0116edad6496c183f" +checksum = "42ae2468a89544a466886840aa467a25b766499f4f04bf7d9fcd10ecee9fccef" dependencies = [ "arrayref", "arrayvec 0.7.2", "cc", "cfg-if 1.0.0", - "constant_time_eq", - "digest 0.10.3", + "constant_time_eq 0.2.4", + "digest 0.10.5", ] [[package]] @@ -358,7 +377,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "699194c00f3a2effd3358d47f880646818e3d483190b17ebcdf598c654fb77e9" dependencies = [ - "base64", + "base64 0.13.1", "bollard-stubs", "bytes", "chrono", @@ -411,9 +430,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.7.0" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631" +checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" [[package]] name = "byte-slice-cast" @@ -429,9 +448,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.0.1" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" +checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" [[package]] name = "cc" @@ -456,62 +475,61 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.22" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfd4d1b31faaa3a89d7934dbded3111da0d2ef28e3ebccdb4f0179f5929d1ef1" +checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" dependencies = [ "iana-time-zone", "js-sys", "num-integer", "num-traits", "serde", - "time", + "time 0.1.44", "wasm-bindgen", "winapi", ] [[package]] name = "cid" -version = "0.8.6" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ed9c8b2d17acb8110c46f1da5bf4a696d745e1474a16db0cd2b49cd0249bf2" +checksum = "fd94671561e36e4e7de75f753f577edafb0e7c05d6e4547229fdf7938fbcd2c3" dependencies = [ "core2", "multibase", - "multihash", + "multihash 0.18.0", "serde", "unsigned-varint", ] [[package]] name = "clap" -version = "2.34.0" +version = "3.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" +checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" dependencies = [ - "ansi_term", "atty", "bitflags", - "strsim 0.8.0", - "term_size", - "textwrap 0.11.0", - "unicode-width", - "vec_map", + "clap_derive", + "clap_lex", + "indexmap", + "once_cell", + "strsim", + "termcolor", + "textwrap", ] [[package]] -name = "clap" -version = "3.2.21" +name = "clap_derive" +version = "3.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ed5341b2301a26ab80be5cbdced622e80ed808483c52e45e3310a877d3b37d7" +checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65" dependencies = [ - "atty", - "bitflags", - "clap_lex", - "indexmap", - "strsim 0.10.0", - "termcolor", - "textwrap 0.15.0", + "heck 0.4.1", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -523,15 +541,6 @@ dependencies = [ "os_str_bytes", ] -[[package]] -name = "cmake" -version = "0.1.46" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7b858541263efe664aead4a5209a4ae5c5d2811167d4ed4ee0944503f8d2089" -dependencies = [ - "cc", -] - [[package]] name = "combine" version = "3.8.1" @@ -545,6 +554,22 @@ dependencies = [ "unreachable", ] +[[package]] +name = "common-multipart-rfc7578" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5baee326bc603965b0f26583e1ecd7c111c41b49bd92a344897476a352798869" +dependencies = [ + "bytes", + "futures-core", + "futures-util", + "http", + "mime", + "mime_guess", + "rand", + "thiserror", +] + [[package]] name = "console" version = "0.13.0" @@ -573,6 +598,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "constant_time_eq" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3ad85c1f65dc7b37604eb0e89748faf0b9653065f2a8ef69f96a687ec1e9279" + [[package]] name = "convert_case" version = "0.4.0" @@ -877,7 +908,7 @@ dependencies = [ "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", + "strsim", "syn", ] @@ -962,7 +993,7 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8910921b014e2af16298f006de12aa08af894b71f0f49a486ab6d74b17bbed" dependencies = [ - "heck 0.4.0", + "heck 0.4.1", "proc-macro2", "quote", "syn", @@ -1020,9 +1051,9 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" +checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" dependencies = [ "block-buffer 0.10.2", "crypto-common", @@ -1039,6 +1070,15 @@ dependencies = [ "dirs-sys-next", ] +[[package]] +name = "dirs" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" +dependencies = [ + "dirs-sys", +] + [[package]] name = "dirs-next" version = "2.0.0" @@ -1049,6 +1089,17 @@ dependencies = [ "dirs-sys-next", ] +[[package]] +name = "dirs-sys" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + [[package]] name = "dirs-sys-next" version = "0.1.2" @@ -1096,9 +1147,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.9.0" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" +checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" dependencies = [ "atty", "humantime 2.1.0", @@ -1240,12 +1291,12 @@ checksum = "398ea4fabe40b9b0d885340a2a991a44c8a645624075ad966d21f88688e2b69e" [[package]] name = "flate2" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" +checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" dependencies = [ "crc32fast", - "miniz_oxide 0.5.3", + "miniz_oxide 0.6.2", ] [[package]] @@ -1271,11 +1322,10 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" dependencies = [ - "matches", "percent-encoding", ] @@ -1438,9 +1488,9 @@ checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7" [[package]] name = "git-testament" -version = "0.2.0" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "096cb9c8aa6f1924d079bf417f1d1685286958ff362fa58ae4d65a53ffec6c02" +checksum = "986bf57c808270f3a0a0652c3bfce0f5d667aa5f5b465616dc697c7f390834b1" dependencies = [ "git-testament-derive", "no-std-compat", @@ -1448,15 +1498,15 @@ dependencies = [ [[package]] name = "git-testament-derive" -version = "0.1.12" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45ceded7b01141664c3fc4a50199c408a6ed247e6c8415dc005e895f1d233374" +checksum = "4a782db5866c7ab75f3552dda4cbf34e3e257cc64c963c6ed5af1e12818e8ae6" dependencies = [ - "chrono", "log", "proc-macro2", "quote", "syn", + "time 0.3.17", ] [[package]] @@ -1474,7 +1524,7 @@ dependencies = [ [[package]] name = "graph" -version = "0.27.0" +version = "0.29.0" dependencies = [ "Inflector", "anyhow", @@ -1485,6 +1535,7 @@ dependencies = [ "bytes", "chrono", "cid", + "clap", "diesel", "diesel_derives", "envconfig", @@ -1508,6 +1559,7 @@ dependencies = [ "prost", "prost-types", "rand", + "regex", "reqwest", "semver", "serde", @@ -1521,7 +1573,6 @@ dependencies = [ "slog-term", "stable-hash 0.3.3", "stable-hash 0.4.2", - "structopt", "strum", "strum_macros", "test-store", @@ -1539,7 +1590,7 @@ dependencies = [ [[package]] name = "graph-chain-arweave" -version = "0.27.0" +version = "0.29.0" dependencies = [ "base64-url", "diesel", @@ -1549,23 +1600,23 @@ dependencies = [ "prost", "prost-types", "serde", - "sha2 0.10.5", + "sha2 0.10.6", "tonic-build", ] [[package]] name = "graph-chain-common" -version = "0.27.0" +version = "0.29.0" dependencies = [ "anyhow", - "heck 0.4.0", - "protobuf 3.1.0", + "heck 0.4.1", + "protobuf 3.2.0", "protobuf-parse", ] [[package]] name = "graph-chain-cosmos" -version = "0.27.0" +version = "0.29.0" dependencies = [ "anyhow", "graph", @@ -1581,14 +1632,15 @@ dependencies = [ [[package]] name = "graph-chain-ethereum" -version = "0.27.0" +version = "0.29.0" dependencies = [ "anyhow", - "base64", + "base64 0.20.0", "dirs-next", "envconfig", "futures 0.1.31", "graph", + "graph-mock", "graph-runtime-derive", "graph-runtime-wasm", "hex", @@ -1607,9 +1659,9 @@ dependencies = [ [[package]] name = "graph-chain-near" -version = "0.27.0" +version = "0.29.0" dependencies = [ - "base64", + "base64 0.20.0", "diesel", "graph", "graph-runtime-derive", @@ -1622,10 +1674,11 @@ dependencies = [ [[package]] name = "graph-chain-substreams" -version = "0.27.0" +version = "0.29.0" dependencies = [ "anyhow", "async-stream", + "base64 0.20.0", "dirs-next", "envconfig", "futures 0.1.31", @@ -1648,7 +1701,7 @@ dependencies = [ [[package]] name = "graph-core" -version = "0.27.0" +version = "0.29.0" dependencies = [ "anyhow", "async-stream", @@ -1668,6 +1721,8 @@ dependencies = [ "graph-runtime-wasm", "graphql-parser", "hex", + "ipfs-api", + "ipfs-api-backend-hyper", "lazy_static", "lru_time_cache", "pretty_assertions", @@ -1676,13 +1731,14 @@ dependencies = [ "serde_json", "serde_yaml", "test-store", - "tower 0.4.12 (git+https://github.com/tower-rs/tower.git)", + "tower 0.4.12", "tower-test", + "uuid", ] [[package]] name = "graph-graphql" -version = "0.27.0" +version = "0.29.0" dependencies = [ "Inflector", "anyhow", @@ -1704,19 +1760,19 @@ dependencies = [ [[package]] name = "graph-mock" -version = "0.27.0" +version = "0.29.0" dependencies = [ "graph", ] [[package]] name = "graph-node" -version = "0.27.0" +version = "0.29.0" dependencies = [ - "clap 3.2.21", + "clap", "crossbeam-channel", "diesel", - "env_logger 0.9.0", + "env_logger 0.9.3", "futures 0.3.16", "git-testament", "graph", @@ -1739,20 +1795,19 @@ dependencies = [ "json-structural-diff", "lazy_static", "prometheus", - "regex", "serde", "serde_regex", "shellexpand", - "structopt", - "toml", + "termcolor", + "toml 0.7.1", "url", ] [[package]] name = "graph-runtime-derive" -version = "0.27.0" +version = "0.29.0" dependencies = [ - "heck 0.4.0", + "heck 0.4.1", "proc-macro2", "quote", "syn", @@ -1760,7 +1815,7 @@ dependencies = [ [[package]] name = "graph-runtime-test" -version = "0.27.0" +version = "0.29.0" dependencies = [ "graph", "graph-chain-ethereum", @@ -1776,7 +1831,7 @@ dependencies = [ [[package]] name = "graph-runtime-wasm" -version = "0.27.0" +version = "0.29.0" dependencies = [ "anyhow", "async-trait", @@ -1795,14 +1850,14 @@ dependencies = [ "semver", "strum", "strum_macros", - "uuid 1.1.2", + "uuid", "wasm-instrument", "wasmtime", ] [[package]] name = "graph-server-http" -version = "0.27.0" +version = "0.29.0" dependencies = [ "futures 0.1.31", "graph", @@ -1816,9 +1871,9 @@ dependencies = [ [[package]] name = "graph-server-index-node" -version = "0.27.0" +version = "0.29.0" dependencies = [ - "blake3 1.3.1", + "blake3 1.3.3", "either", "futures 0.3.16", "graph", @@ -1836,28 +1891,24 @@ dependencies = [ [[package]] name = "graph-server-json-rpc" -version = "0.27.0" +version = "0.29.0" dependencies = [ "graph", - "jsonrpc-http-server", - "lazy_static", + "jsonrpsee", "serde", ] [[package]] name = "graph-server-metrics" -version = "0.27.0" +version = "0.29.0" dependencies = [ "graph", - "http", "hyper", - "lazy_static", - "serde", ] [[package]] name = "graph-server-websocket" -version = "0.27.0" +version = "0.29.0" dependencies = [ "anyhow", "futures 0.1.31", @@ -1868,18 +1919,18 @@ dependencies = [ "serde", "serde_derive", "tokio-tungstenite", - "uuid 0.8.2", + "uuid", ] [[package]] name = "graph-store-postgres" -version = "0.27.0" +version = "0.29.0" dependencies = [ "Inflector", "anyhow", "async-trait", - "blake3 1.3.1", - "clap 3.2.21", + "blake3 1.3.3", + "clap", "derive_more", "diesel", "diesel-derive-enum", @@ -1904,18 +1955,20 @@ dependencies = [ "pin-utils", "postgres", "postgres-openssl", + "pretty_assertions", "rand", "serde", "stable-hash 0.3.3", "test-store", - "uuid 1.1.2", + "uuid", ] [[package]] name = "graph-tests" -version = "0.27.0" +version = "0.29.0" dependencies = [ "anyhow", + "assert-json-diff", "async-stream", "bollard", "cid", @@ -1927,15 +1980,18 @@ dependencies = [ "graph-graphql", "graph-mock", "graph-node", + "graph-runtime-wasm", + "graph-server-index-node", "graph-store-postgres", "graphql-parser", - "hex", + "hyper", "lazy_static", - "port_check", + "serde", "serde_yaml", "slog", "tokio", "tokio-stream", + "uuid", ] [[package]] @@ -1950,9 +2006,9 @@ dependencies = [ [[package]] name = "graphql-tools" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a71c3ac880d8383537914ea7b45d99c6946e15976faa33a42b6c339ef4a2fb8" +checksum = "7bc3a979aca9d796ff03ff71f4013e203a1f69bf1f37899ae4a8e676bb236608" dependencies = [ "graphql-parser", "lazy_static", @@ -1985,20 +2041,30 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db0d4cf898abf0081f964436dc980e96670a0f36863e4b83aaacdb65c9d7ccc3" +[[package]] +name = "hdrhistogram" +version = "7.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f19b9f54f7c7f55e31401bb647626ce0cf0f67b0004982ce815b3ee72a02aa8" +dependencies = [ + "byteorder", + "num-traits", +] + [[package]] name = "headers" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4c4eb0471fcb85846d8b0690695ef354f9afb11cb03cac2e1d7c9253351afb0" dependencies = [ - "base64", + "base64 0.13.1", "bitflags", "bytes", "headers-core", "http", "httpdate", "mime", - "sha-1", + "sha-1 0.9.7", ] [[package]] @@ -2021,9 +2087,9 @@ dependencies = [ [[package]] name = "heck" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" @@ -2034,6 +2100,15 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" +dependencies = [ + "libc", +] + [[package]] name = "hex" version = "0.4.3" @@ -2058,20 +2133,20 @@ dependencies = [ [[package]] name = "http" -version = "0.2.5" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1323096b05d41827dadeaee54c9981958c0f94e670bc94ed80037d1a7b8b186b" +checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa 0.4.7", + "itoa 1.0.1", ] [[package]] name = "http-body" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", @@ -2135,6 +2210,34 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-multipart-rfc7578" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0eb2cf73e96e9925f4bed948e763aa2901c2f1a3a5f713ee41917433ced6671" +dependencies = [ + "bytes", + "common-multipart-rfc7578", + "futures-core", + "http", + "hyper", +] + +[[package]] +name = "hyper-rustls" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" +dependencies = [ + "http", + "hyper", + "log", + "rustls", + "rustls-native-certs", + "tokio", + "tokio-rustls", +] + [[package]] name = "hyper-timeout" version = "0.4.1" @@ -2175,13 +2278,14 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.44" +version = "0.1.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf7d67cf4a22adc5be66e75ebdf769b3f2ea032041437a7061f97a63dad4b" +checksum = "4c495f162af0bf17656d0014a0eded5f3cd2f365fdd204548c2869db89359dc7" dependencies = [ "android_system_properties", "core-foundation-sys", "js-sys", + "once_cell", "wasm-bindgen", "winapi", ] @@ -2216,6 +2320,16 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "impl-codec" version = "0.6.0" @@ -2256,9 +2370,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.1" +version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" +checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" dependencies = [ "autocfg", "hashbrown", @@ -2266,21 +2380,65 @@ dependencies = [ ] [[package]] -name = "input_buffer" -version = "0.4.0" +name = "instant" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f97967975f448f1a7ddb12b0bc41069d09ed6a1c161a92687e057325db35d413" +checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d" dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "ipfs-api" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d8cc57cf12ae4af611e53dd04053e1cfb815917c51c410aa30399bf377046ab" +dependencies = [ + "ipfs-api-backend-hyper", +] + +[[package]] +name = "ipfs-api-backend-hyper" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9d131b408b4caafe1e7c00d410a09ad3eb7e3ab68690cf668e86904b2176b4" +dependencies = [ + "async-trait", + "base64 0.13.1", "bytes", + "futures 0.3.16", + "http", + "hyper", + "hyper-multipart-rfc7578", + "hyper-rustls", + "ipfs-api-prelude", + "thiserror", ] [[package]] -name = "instant" -version = "0.1.10" +name = "ipfs-api-prelude" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d" +checksum = "9b74065805db266ba2c6edbd670b23c4714824a955628472b2e46cc9f3a869cb" dependencies = [ + "async-trait", + "bytes", "cfg-if 1.0.0", + "common-multipart-rfc7578", + "dirs", + "futures 0.3.16", + "http", + "multiaddr", + "multibase", + "serde", + "serde_json", + "serde_urlencoded", + "thiserror", + "tokio", + "tokio-util 0.7.1", + "tracing", + "typed-builder", + "walkdir", ] [[package]] @@ -2303,9 +2461,9 @@ dependencies = [ [[package]] name = "itertools" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] @@ -2368,37 +2526,74 @@ dependencies = [ ] [[package]] -name = "jsonrpc-http-server" -version = "18.0.0" +name = "jsonrpsee" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" +checksum = "8bd0d559d5e679b1ab2f869b486a11182923863b1b3ee8b421763cdd707b783a" dependencies = [ - "futures 0.3.16", + "jsonrpsee-core", + "jsonrpsee-http-server", + "jsonrpsee-types", +] + +[[package]] +name = "jsonrpsee-core" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3dc3e9cf2ba50b7b1d7d76a667619f82846caa39e8e8daa8a4962d74acaddca" +dependencies = [ + "anyhow", + "arrayvec 0.7.2", + "async-trait", + "beef", + "futures-channel", + "futures-util", + "globset", + "http", "hyper", - "jsonrpc-core", - "jsonrpc-server-utils", - "log", - "net2", - "parking_lot 0.11.2", + "jsonrpsee-types", + "lazy_static", + "parking_lot 0.12.1", + "rand", + "rustc-hash", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", "unicase", ] [[package]] -name = "jsonrpc-server-utils" -version = "18.0.0" +name = "jsonrpsee-http-server" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03802f0373a38c2420c70b5144742d800b509e2937edc4afb116434f07120117" +dependencies = [ + "futures-channel", + "futures-util", + "hyper", + "jsonrpsee-core", + "jsonrpsee-types", + "serde", + "serde_json", + "tokio", + "tracing", + "tracing-futures", +] + +[[package]] +name = "jsonrpsee-types" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" +checksum = "e290bba767401b646812f608c099b922d8142603c9e73a50fb192d3ac86f4a0d" dependencies = [ - "bytes", - "futures 0.3.16", - "globset", - "jsonrpc-core", - "lazy_static", - "log", - "tokio", - "tokio-stream", - "tokio-util 0.6.7", - "unicase", + "anyhow", + "beef", + "serde", + "serde_json", + "thiserror", + "tracing", ] [[package]] @@ -2478,9 +2673,9 @@ checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" [[package]] name = "matchit" -version = "0.5.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" +checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" [[package]] name = "maybe-owned" @@ -2563,9 +2758,9 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.5.3" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f5c75688da582b8ffc1f1799e9db273f32133c49e048f614d22ec3256773ccc" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" dependencies = [ "adler", ] @@ -2598,6 +2793,24 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0debeb9fcf88823ea64d64e4a815ab1643f33127d995978e099942ce38f25238" +[[package]] +name = "multiaddr" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b53e0cc5907a5c216ba6584bf74be8ab47d6d6289f72793b2dddbf15dc3bf8c" +dependencies = [ + "arrayref", + "byteorder", + "data-encoding", + "multibase", + "multihash 0.17.0", + "percent-encoding", + "serde", + "static_assertions", + "unsigned-varint", + "url", +] + [[package]] name = "multibase" version = "0.9.1" @@ -2611,17 +2824,28 @@ dependencies = [ [[package]] name = "multihash" -version = "0.16.2" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" +dependencies = [ + "core2", + "multihash-derive", + "unsigned-varint", +] + +[[package]] +name = "multihash" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3db354f401db558759dfc1e568d010a5d4146f4d3f637be1275ec4a3cf09689" +checksum = "15e5d911412e631e1de11eb313e4dd71f73fd964401102aab23d6c8327c431ba" dependencies = [ "blake2b_simd", "blake2s_simd", - "blake3 1.3.1", + "blake3 1.3.3", "core2", - "digest 0.10.3", + "digest 0.10.5", "multihash-derive", - "sha2 0.10.5", + "sha2 0.10.6", "sha3", "unsigned-varint", ] @@ -2664,17 +2888,6 @@ dependencies = [ "tempfile", ] -[[package]] -name = "net2" -version = "0.2.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" -dependencies = [ - "cfg-if 0.1.10", - "libc", - "winapi", -] - [[package]] name = "never" version = "0.1.0" @@ -2687,6 +2900,15 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" +[[package]] +name = "nom8" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae01545c9c7fc4486ab7debaf2aad7003ac19431791868fb2e8066df97fad2f8" +dependencies = [ + "memchr", +] + [[package]] name = "ntapi" version = "0.3.6" @@ -2729,11 +2951,11 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.1" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" dependencies = [ - "hermit-abi", + "hermit-abi 0.2.6", "libc", ] @@ -2770,9 +2992,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.41" +version = "0.10.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "618febf65336490dfcf20b73f885f5651a0c89c64c2d4a8c3662585a70bf5bd0" +checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" dependencies = [ "bitflags", "cfg-if 1.0.0", @@ -2802,9 +3024,9 @@ checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "openssl-sys" -version = "0.9.75" +version = "0.9.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5f9bd0c2710541a3cda73d6f9ac4f1b240de4ae261065d309dbe73d9dceb42f" +checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" dependencies = [ "autocfg", "cc", @@ -2916,9 +3138,9 @@ checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58" [[package]] name = "percent-encoding" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "petgraph" @@ -2950,18 +3172,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.8" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "576bc800220cc65dac09e99e97b08b358cfab6e17078de8dc5fee223bd2d0c08" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.8" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e8fe8163d14ce7f0cdac2e040116f22eac817edabff0be91e8aff7e9accf389" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", @@ -2986,12 +3208,6 @@ version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" -[[package]] -name = "port_check" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6519412c9e0d4be579b9f0618364d19cb434b324fc6ddb1b27b1e682c7105ed" - [[package]] name = "postgres" version = "0.19.1" @@ -3025,7 +3241,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff3e0f70d32e20923cabf2df02913be7c1842d4c772db8065c00fcfdd1d1bff3" dependencies = [ - "base64", + "base64 0.13.1", "byteorder", "bytes", "fallible-iterator", @@ -3065,14 +3281,14 @@ dependencies = [ [[package]] name = "pretty_assertions" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c89f989ac94207d048d92db058e4f6ec7342b0971fc58d1271ca148b799b3563" +checksum = "a25e9bcb20aa780fd0bb16b72403a9064d6b3f22f026946029acb941a50af755" dependencies = [ - "ansi_term", "ctor", "diff", "output_vt100", + "yansi", ] [[package]] @@ -3115,7 +3331,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ebace6889caf889b4d3f76becee12e90353f2b8c7d875534a71e5742f8f6f83" dependencies = [ "thiserror", - "toml", + "toml 0.5.11", ] [[package]] @@ -3156,18 +3372,18 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.43" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a2ca2c61bc9f3d74d2886294ab7b9853abd9c1ad903a3ac7815c58989bb7bab" +checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6" dependencies = [ "unicode-ident", ] [[package]] name = "prometheus" -version = "0.13.1" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cface98dfa6d645ea4c789839f176e4b072265d085bfcc48eaa8d137f58d3c39" +checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" dependencies = [ "cfg-if 1.0.0", "fnv", @@ -3182,9 +3398,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.10.4" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e" +checksum = "21dc42e00223fc37204bd4aa177e69420c604ca4a183209a8f9de30c6d934698" dependencies = [ "bytes", "prost-derive", @@ -3192,31 +3408,31 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.10.1" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120fbe7988713f39d780a58cf1a7ef0d7ef66c6d87e5aa3438940c05357929f4" +checksum = "cb5320c680de74ba083512704acb90fe00f28f79207286a848e730c45dd73ed6" dependencies = [ "bytes", - "cfg-if 1.0.0", - "cmake", - "heck 0.4.0", + "heck 0.4.1", "itertools", "lazy_static", "log", "multimap", "petgraph", + "prettyplease", "prost", "prost-types", "regex", + "syn", "tempfile", "which", ] [[package]] name = "prost-derive" -version = "0.10.1" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b670f45da57fb8542ebdbb6105a925fe571b67f9e7ed9f47a06a84e72b4e7cc" +checksum = "8bda8c0881ea9f722eb9629376db3d0b903b462477c1aafcb0566610ac28ac5d" dependencies = [ "anyhow", "itertools", @@ -3227,9 +3443,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.10.1" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" +checksum = "a5e0526209433e96d83d750dd81a99118edbc55739e7e61a46764fd2ad537788" dependencies = [ "bytes", "prost", @@ -3243,9 +3459,9 @@ checksum = "020f86b07722c5c4291f7c723eac4676b3892d47d9a7708dc2779696407f039b" [[package]] name = "protobuf" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee4a7d8b91800c8f167a6268d1a1026607368e1adc84e98fe044aeb905302f7" +checksum = "b55bad9126f378a853655831eb7363b7b01b81d19f8cb1218861086ca4a1a61e" dependencies = [ "once_cell", "protobuf-support", @@ -3254,14 +3470,14 @@ dependencies = [ [[package]] name = "protobuf-parse" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b1447dd751c434cc1b415579837ebd0411ed7d67d465f38010da5d7cd33af4d" +checksum = "9d39b14605eaa1f6a340aec7f320b34064feb26c93aec35d6a9a2272a8ddfa49" dependencies = [ "anyhow", "indexmap", "log", - "protobuf 3.1.0", + "protobuf 3.2.0", "protobuf-support", "tempfile", "thiserror", @@ -3270,9 +3486,9 @@ dependencies = [ [[package]] name = "protobuf-support" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ca157fe12fc7ee2e315f2f735e27df41b3d97cdd70ea112824dac1ffb08ee1c" +checksum = "a5d4d7b8601c814cfb36bcebb79f0e61e45e1e93640cf778837833bbed05c372" dependencies = [ "thiserror", ] @@ -3294,9 +3510,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.20" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804" +checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" dependencies = [ "proc-macro2", ] @@ -3454,7 +3670,7 @@ version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "246e9f61b9bb77df069a947682be06e31ac43ea37862e244a69f177694ea6d22" dependencies = [ - "base64", + "base64 0.13.1", "bytes", "encoding_rs", "futures-core", @@ -3555,34 +3771,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" dependencies = [ "openssl-probe", - "rustls-pemfile 1.0.0", + "rustls-pemfile", "schannel", "security-framework", ] -[[package]] -name = "rustls-pemfile" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ee86d63972a7c661d1536fefe8c3c8407321c3df668891286de28abcd087360" -dependencies = [ - "base64", -] - [[package]] name = "rustls-pemfile" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7522c9de787ff061458fe9a829dc790a3f5b22dc571694fc5883f448b94d9a9" dependencies = [ - "base64", + "base64 0.13.1", ] [[package]] name = "rustversion" -version = "1.0.5" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61b3909d758bb75c79f23d4736fac9433868679d3ad2ea7a61e3c25cfda9a088" +checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" [[package]] name = "ryu" @@ -3590,6 +3797,15 @@ version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + [[package]] name = "schannel" version = "0.1.19" @@ -3698,27 +3914,27 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.12" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2333e6df6d6598f2b1974829f853c2b4c5f4a6e503c10af918081aa6f8564e1" +checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" dependencies = [ "serde", ] [[package]] name = "serde" -version = "1.0.127" +version = "1.0.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f03b9878abf6d14e6779d3f24f07b2cfa90352cfec4acc5aab8f1ac7f146fae8" +checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.127" +version = "1.0.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a024926d3432516606328597e0f224a51355a493b49fdd67e9209187cbe55ecc" +checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" dependencies = [ "proc-macro2", "quote", @@ -3738,9 +3954,9 @@ dependencies = [ [[package]] name = "serde_plain" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95455e7e29fada2052e72170af226fbe368a4ca33dee847875325d9fdb133858" +checksum = "d6018081315db179d0ce57b1fe4b62a12a0028c9cf9bbef868c9cf477b3c34ae" dependencies = [ "serde", ] @@ -3755,6 +3971,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0efd8caf556a6cebd3b285caf480045fcc1ac04f6bd786b09a6f11af30c4fcf4" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.0" @@ -3815,6 +4040,17 @@ dependencies = [ "opaque-debug", ] +[[package]] +name = "sha-1" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures 0.2.2", + "digest 0.10.5", +] + [[package]] name = "sha2" version = "0.9.5" @@ -3830,13 +4066,13 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9db03534dff993187064c4e0c05a5708d2a9728ace9a8959b77bedf415dac5" +checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if 1.0.0", "cpufeatures 0.2.2", - "digest 0.10.3", + "digest 0.10.5", ] [[package]] @@ -3845,7 +4081,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "881bf8156c87b6301fc5ca6b27f11eeb2761224c7081e69b409d5a1951a70c86" dependencies = [ - "digest 0.10.3", + "digest 0.10.5", "keccak", ] @@ -3969,13 +4205,13 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ - "base64", + "base64 0.13.1", "bytes", "futures 0.3.16", "httparse", "log", "rand", - "sha-1", + "sha-1 0.9.7", ] [[package]] @@ -4035,42 +4271,12 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" - [[package]] name = "strsim" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" -[[package]] -name = "structopt" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c6b5c64445ba8094a6ab0c3cd2ad323e07171012d9c98b0b15651daf1787a10" -dependencies = [ - "clap 2.34.0", - "lazy_static", - "structopt-derive", -] - -[[package]] -name = "structopt-derive" -version = "0.4.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" -dependencies = [ - "heck 0.3.3", - "proc-macro-error", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "strum" version = "0.21.0" @@ -4097,9 +4303,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.98" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd" +checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" dependencies = [ "proc-macro2", "quote", @@ -4167,21 +4373,11 @@ dependencies = [ "winapi", ] -[[package]] -name = "term_size" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e4129646ca0ed8f45d09b929036bafad5377103edd06e50bf574b353d2b08d9" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "termcolor" -version = "1.1.2" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" dependencies = [ "winapi-util", ] @@ -4198,10 +4394,11 @@ dependencies = [ [[package]] name = "test-store" -version = "0.27.0" +version = "0.29.0" dependencies = [ "diesel", "graph", + "graph-chain-ethereum", "graph-graphql", "graph-mock", "graph-node", @@ -4209,24 +4406,15 @@ dependencies = [ "graphql-parser", "hex-literal", "lazy_static", + "prost-types", "serde", ] [[package]] name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "term_size", - "unicode-width", -] - -[[package]] -name = "textwrap" -version = "0.15.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" +checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" @@ -4268,6 +4456,33 @@ dependencies = [ "winapi", ] +[[package]] +name = "time" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" +dependencies = [ + "itoa 1.0.1", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" + +[[package]] +name = "time-macros" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" +dependencies = [ + "time-core", +] + [[package]] name = "tiny-keccak" version = "1.5.0" @@ -4410,9 +4625,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9" +checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" dependencies = [ "futures-core", "pin-project-lite", @@ -4435,13 +4650,12 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.14.0" +version = "0.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e96bb520beab540ab664bd5a9cfeaa1fcd846fa68c830b42e2c8963071251d2" +checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" dependencies = [ "futures-util", "log", - "pin-project", "tokio", "tungstenite", ] @@ -4477,23 +4691,57 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.8" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ "serde", ] [[package]] -name = "tonic" +name = "toml" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30fb54bf1e446f44d870d260d99957e7d11fb9d0a0f5bd1a662ad1411cc103f9" +checksum = "772c1426ab886e7362aedf4abc9c0d1348a979517efedfc25862944d10137af0" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90a238ee2e6ede22fb95350acc78e21dc40da00bb66c0334bde83de4ed89424e" +dependencies = [ + "indexmap", + "nom8", + "serde", + "serde_spanned", + "toml_datetime", +] + +[[package]] +name = "tonic" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f219fad3b929bef19b1f86fbc0358d35daed8f2cac972037ac0dc10bbb8d5fb" dependencies = [ "async-stream", "async-trait", "axum", - "base64", + "base64 0.13.1", "bytes", "flate2", "futures-core", @@ -4508,13 +4756,13 @@ dependencies = [ "prost", "prost-derive", "rustls-native-certs", - "rustls-pemfile 0.3.0", + "rustls-pemfile", "tokio", "tokio-rustls", "tokio-stream", "tokio-util 0.7.1", - "tower 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tower-layer 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tower 0.4.13", + "tower-layer 0.3.2", "tower-service 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "tracing", "tracing-futures", @@ -4522,9 +4770,9 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.7.2" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9263bf4c9bfaae7317c1c2faf7f18491d2fe476f70c414b73bf5d445b00ffa1" +checksum = "5bf5e9b9c0f7e0a7c027dcfaba7b2c60816c7049171f679d99ee2ff65d0de8c4" dependencies = [ "prettyplease", "proc-macro2", @@ -4536,35 +4784,39 @@ dependencies = [ [[package]] name = "tower" version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e" +source = "git+https://github.com/tower-rs/tower.git#74881d531141ba0f07b7f58e2a72e3594e5a665c" dependencies = [ "futures-core", "futures-util", + "hdrhistogram", "indexmap", - "pin-project", "pin-project-lite", - "rand", "slab", + "sync_wrapper", "tokio", "tokio-util 0.7.1", - "tower-layer 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tower-service 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tower-layer 0.3.1", + "tower-service 0.3.1 (git+https://github.com/tower-rs/tower.git)", "tracing", ] [[package]] name = "tower" -version = "0.4.12" -source = "git+https://github.com/tower-rs/tower.git#ee826286fd1f994eabf14229e1e579ae29237386" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", + "indexmap", + "pin-project", "pin-project-lite", + "rand", + "slab", "tokio", "tokio-util 0.7.1", - "tower-layer 0.3.1 (git+https://github.com/tower-rs/tower.git)", - "tower-service 0.3.1 (git+https://github.com/tower-rs/tower.git)", + "tower-layer 0.3.2", + "tower-service 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "tracing", ] @@ -4582,21 +4834,21 @@ dependencies = [ "http-body", "http-range-header", "pin-project-lite", - "tower 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tower-layer 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tower 0.4.13", + "tower-layer 0.3.2", "tower-service 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tower-layer" version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" +source = "git+https://github.com/tower-rs/tower.git#74881d531141ba0f07b7f58e2a72e3594e5a665c" [[package]] name = "tower-layer" -version = "0.3.1" -source = "git+https://github.com/tower-rs/tower.git#ee826286fd1f994eabf14229e1e579ae29237386" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" [[package]] name = "tower-service" @@ -4607,18 +4859,18 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tower-service" version = "0.3.1" -source = "git+https://github.com/tower-rs/tower.git#ee826286fd1f994eabf14229e1e579ae29237386" +source = "git+https://github.com/tower-rs/tower.git#74881d531141ba0f07b7f58e2a72e3594e5a665c" [[package]] name = "tower-test" version = "0.4.0" -source = "git+https://github.com/tower-rs/tower.git#ee826286fd1f994eabf14229e1e579ae29237386" +source = "git+https://github.com/tower-rs/tower.git#74881d531141ba0f07b7f58e2a72e3594e5a665c" dependencies = [ "futures-util", "pin-project-lite", "tokio", "tokio-test", - "tower-layer 0.3.1 (git+https://github.com/tower-rs/tower.git)", + "tower-layer 0.3.1", "tower-service 0.3.1 (git+https://github.com/tower-rs/tower.git)", ] @@ -4673,24 +4925,34 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "tungstenite" -version = "0.13.0" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fe8dada8c1a3aeca77d6b51a4f1314e0f4b8e438b7b1b71e3ddaca8080e4093" +checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ - "base64", + "base64 0.13.1", "byteorder", "bytes", "http", "httparse", - "input_buffer", "log", "rand", - "sha-1", + "sha-1 0.10.0", "thiserror", "url", "utf-8", ] +[[package]] +name = "typed-builder" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89851716b67b937e393b3daa8423e67ddfc4bbbf1654bcf05488e95e0828db0c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "typenum" version = "1.15.0" @@ -4783,13 +5045,12 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.2.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" dependencies = [ "form_urlencoded", - "idna", - "matches", + "idna 0.3.0", "percent-encoding", ] @@ -4801,18 +5062,9 @@ checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] name = "uuid" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" -dependencies = [ - "getrandom", -] - -[[package]] -name = "uuid" -version = "1.1.2" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd6469f4314d5f1ffec476e05f17cc9a78bc7a27a6a857842170bdf8d6f98d2f" +checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" dependencies = [ "getrandom", ] @@ -4823,12 +5075,6 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" -[[package]] -name = "vec_map" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" - [[package]] name = "version_check" version = "0.9.3" @@ -4841,6 +5087,17 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" +[[package]] +name = "walkdir" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +dependencies = [ + "same-file", + "winapi", + "winapi-util", +] + [[package]] name = "want" version = "0.3.0" @@ -4980,7 +5237,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d14d500d5c3dc5f5c097158feee123d64b3097f0d836a2a27dff9c761c73c843" dependencies = [ "anyhow", - "base64", + "base64 0.13.1", "bincode", "directories-next", "errno", @@ -4989,7 +5246,7 @@ dependencies = [ "log", "serde", "sha2 0.9.5", - "toml", + "toml 0.5.11", "winapi", "zstd", ] @@ -5180,7 +5437,7 @@ version = "0.19.0-graph" source = "git+https://github.com/graphprotocol/rust-web3?branch=graph-patches-onto-0.18#7f8eb6dfcc13a4186f9b42f91de950646bc4a833" dependencies = [ "arrayvec 0.7.2", - "base64", + "base64 0.13.1", "bytes", "derive_more", "ethabi", @@ -5189,7 +5446,7 @@ dependencies = [ "futures-timer", "headers", "hex", - "idna", + "idna 0.2.3", "jsonrpc-core", "log", "once_cell", @@ -5349,6 +5606,12 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "yansi" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" + [[package]] name = "zstd" version = "0.6.1+zstd.1.4.9" diff --git a/Cargo.toml b/Cargo.toml index 3bb9c26a3ae..74cafca8ba8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,19 +5,28 @@ members = [ "graphql", "mock", "node", - "runtime/wasm", - "runtime/derive", - "runtime/test", - "server/http", - "server/json-rpc", - "server/index-node", - "server/metrics", - "store/postgres", - "store/test-store", + "runtime/*", + "server/*", + "store/*", "graph", "tests", ] +[workspace.package] +version = "0.29.0" +edition = "2021" +authors = ["The Graph core developers & contributors"] +readme = "README.md" +homepage = "https://thegraph.com" +repository = "https://github.com/graphprotocol/graph-node" +license = "MIT OR Apache-2.0" + +[workspace.dependencies] +prost = "0.11.6" +prost-types = "0.11.6" +tonic = { version = "0.8.3", features = ["tls-roots", "gzip"] } +tonic-build = { version = "0.8.4", features = ["prost"] } + # Incremental compilation on Rust 1.58 causes an ICE on build. As soon as graph node builds again, these can be removed. [profile.test] incremental = false diff --git a/NEWS.md b/NEWS.md index 497c56de70c..26fb38b75d2 100644 --- a/NEWS.md +++ b/NEWS.md @@ -2,13 +2,236 @@ ## Unreleased -### New DB table for dynamic data sources +- Fields of type `Bytes` can now use less than and greater than filters [#4285](https://github.com/graphprotocol/graph-node/pull/4285) + +## v0.29.0 + +### Upgrade notes + +- This release includes a **determinism fix** that affect a very small number of subgraphs on the network (we counted 2): if a subgraph manifest had one data source with no contract address, listening to the same events or calls of another data source that has a specified address, then the handlers for those would be called twice. After the fix, this will happen no more, and the handler will be called just once like it should. + + Affected subgraph deployments: + + - `Qmccst5mbV5a6vT6VvJMLPKMAA1VRgT6NGbxkLL8eDRsE7` + - `Qmd9nZKCH8UZU1pBzk7G8ECJr3jX3a2vAf3vowuTwFvrQg` + + Here's an example [manifest](https://ipfs.io/ipfs/Qmd9nZKCH8UZU1pBzk7G8ECJr3jX3a2vAf3vowuTwFvrQg), taking a look at the data sources of name `ERC721` and `CryptoKitties`, both listen to the `Transfer(...)` event. Considering a block where there's only one occurence of this event, `graph-node` would duplicate it and call `handleTransfer` twice. Now this is fixed and it will be called only once per event/call that happened on chain. + + In the case you're indexing one of the impacted subgraphs, you should first upgrade the `graph-node` version, then rewind the affected subgraphs to the smallest `startBlock` of their subgraph manifest. To achieve that the `graphman rewind` CLI command can be used. + + See [#4055](https://github.com/graphprotocol/graph-node/pull/4055) for more information. + +* This release fixes another determinism bug that affects a handful of subgraphs. The bug affects all subgraphs which have an `apiVersion` **older than** 0.0.5 using call handlers. While call handlers prior to 0.0.5 should be triggered by both failed and successful transactions, in some cases failed transactions would not trigger the handlers. This resulted in nondeterministic behavior. With this version of `graph-node`, call handlers with an `apiVersion` older than 0.0.5 will always be triggered by both successful and failed transactions. Behavior for `apiVersion` 0.0.5 onward is not affected. + + The affected subgraphs are: + + - `QmNY7gDNXHECV8SXoEY7hbfg4BX1aDMxTBDiFuG4huaSGA` + - `QmYzsCjrVwwXtdsNm3PZVNziLGmb9o513GUzkq5wwhgXDT` + - `QmccAwofKfT9t4XKieDqwZre1UUZxuHw5ynB35BHwHAJDT` + - `QmYUcrn9S1cuSZQGomLRyn8GbNHmX8viqxMykP8kKpghz6` + - `QmecPw1iYuu85rtdYL2J2W9qcr6p8ijich9P5GbEAmmbW5` + - `Qmaz1R8vcv9v3gUfksqiS9JUz7K9G8S5By3JYn8kTiiP5K` + + In the case you're indexing one of the impacted subgraphs, you should first upgrade the `graph-node` version, then rewind the affected subgraphs to the smallest `startBlock` of their subgraph manifest. To achieve that the `graphman rewind` CLI command can be used. + + See [#4149](https://github.com/graphprotocol/graph-node/pull/4149) for more information. + +### What's new + +* Grafted subgraphs can now add their own data sources. [#3989](https://github.com/graphprotocol/graph-node/pull/3989), [#4027](https://github.com/graphprotocol/graph-node/pull/4027), [#4030](https://github.com/graphprotocol/graph-node/pull/4030) +* Add support for filtering by nested interfaces. [#3677](https://github.com/graphprotocol/graph-node/pull/3677) +* Add support for message handlers in Cosmos [#3975](https://github.com/graphprotocol/graph-node/pull/3975) +* Dynamic data sources for Firehose-backed subgraphs. [#4075](https://github.com/graphprotocol/graph-node/pull/4075) +* Various logging improvements. [#4078](https://github.com/graphprotocol/graph-node/pull/4078), [#4084](https://github.com/graphprotocol/graph-node/pull/4084), [#4031](https://github.com/graphprotocol/graph-node/pull/4031), [#4144](https://github.com/graphprotocol/graph-node/pull/4144), [#3990](https://github.com/graphprotocol/graph-node/pull/3990) +* Some DB queries now have GCP Cloud Insight -compliant tags that show where the query originated from. [#4079](https://github.com/graphprotocol/graph-node/pull/4079) +* New configuration variable `GRAPH_STATIC_FILTERS_THRESHOLD` to conditionally enable static filtering based on the number of dynamic data sources. [#4008](https://github.com/graphprotocol/graph-node/pull/4008) +* New configuration variable `GRAPH_STORE_BATCH_TARGET_DURATION`. [#4133](https://github.com/graphprotocol/graph-node/pull/4133) + +#### Docker image +* The official Docker image now runs on Debian 11 "Bullseye". [#4081](https://github.com/graphprotocol/graph-node/pull/4081) +* We now ship [`envsubst`](https://github.com/a8m/envsubst) with the official Docker image, allowing you to easily run templating logic on your configuration files. [#3974](https://github.com/graphprotocol/graph-node/pull/3974) + +#### Graphman + +We have a new documentation page for `graphman`, check it out [here](https://github.com/graphprotocol/graph-node/blob/2da697b1af17b1c947679d1b1a124628146545a6/docs/graphman.md)! + +* Subgraph pruning with `graphman`! [#3898](https://github.com/graphprotocol/graph-node/pull/3898), [#4125](https://github.com/graphprotocol/graph-node/pull/4125), [#4153](https://github.com/graphprotocol/graph-node/pull/4153), [#4152](https://github.com/graphprotocol/graph-node/pull/4152), [#4156](https://github.com/graphprotocol/graph-node/pull/4156), [#4041](https://github.com/graphprotocol/graph-node/pull/4041) +* New command `graphman drop` to hastily delete a subgraph deployment. [#4035](https://github.com/graphprotocol/graph-node/pull/4035) +* New command `graphman chain call-cache` for clearing the call cache for a given chain. [#4066](https://github.com/graphprotocol/graph-node/pull/4066) +* Add `--delete-duplicates` flag to `graphman check-blocks` by @tilacog in https://github.com/graphprotocol/graph-node/pull/3988 + +#### Performance +* Restarting a node now takes much less time because `postgres_fdw` user mappings are only rebuilt upon schema changes. If necessary, you can also use the new commands `graphman database migrate` and `graphman database remap` to respectively apply schema migrations or run remappings manually. [#4009](https://github.com/graphprotocol/graph-node/pull/4009), [#4076](https://github.com/graphprotocol/graph-node/pull/4076) +* Database replicas now won't fall behind as much when copying subgraph data. [#3966](https://github.com/graphprotocol/graph-node/pull/3966) [#3986](https://github.com/graphprotocol/graph-node/pull/3986) +* Block handlers optimization with Firehose >= 1.1.0. [#3971](https://github.com/graphprotocol/graph-node/pull/3971) +* Reduced the amount of data that a non-primary shard has to mirror from the primary shard. [#4015](https://github.com/graphprotocol/graph-node/pull/4015) +* We now use advisory locks to lock deployments' tables against concurrent writes. [#4010](https://github.com/graphprotocol/graph-node/pull/4010) + +#### Bug fixes +* Fixed a bug that would cause some failed subgraphs to never restart. [#3959](https://github.com/graphprotocol/graph-node/pull/3959) +* Fixed a bug that would cause bad POIs for Firehose-backed subgraphs when processing `CREATE` calls. [#4085](https://github.com/graphprotocol/graph-node/pull/4085) +* Fixed a bug which would cause failure to redeploy a subgraph immediately after deletion. [#4044](https://github.com/graphprotocol/graph-node/pull/4044) +* Firehose connections are now load-balanced. [#4083](https://github.com/graphprotocol/graph-node/pull/4083) +* Determinism fixes. **See above.** [#4055](https://github.com/graphprotocol/graph-node/pull/4055), [#4149](https://github.com/graphprotocol/graph-node/pull/4149) + +#### Dependency updates + +| Dependency | updated to | +| ------------------- | ---------- | +| `anyhow` | 1.0.66 | +| `base64` | 0.13.1 | +| `clap` | 3.2.23 | +| `env_logger` | 0.9.1 | +| `iana-time-zone` | 0.1.47 | +| `itertools` | 0.10.5 | +| `jsonrpsee` | 0.15.1 | +| `num_cpus` | 1.14.0 | +| `openssl` | 0.10.42 | +| `pretty_assertions` | 1.3.0 | +| `proc-macro2` | 1.0.47 | +| `prometheus` | 0.13.3 | +| `protobuf-parse` | 3.2.0 | +| `semver` | 1.0.14 | +| `serde_plain` | 1.0.1 | +| `sha2` | 0.10.6 | +| `structopt` | removed | +| `tokio-stream` | 0.1.11 | +| `tokio-tungstenite` | 0.17.2 | +| `tower-test` | `d27ba65` | +| `url` | 2.3.1 | + + + +**Full Changelog**: https://github.com/graphprotocol/graph-node/compare/v0.28.2...v0.29.0 + +## v0.28.2 + +**Indexers are advised to migrate to `v0.28.2`** and entirely bypass `v0.28.0` and `v0.28.1`. + +Fixed a bug which would cause subgraphs to stop syncing under some `graph-node` deployment configurations. [#4046](https://github.com/graphprotocol/graph-node/pull/4046), [#4051](https://github.com/graphprotocol/graph-node/pull/4051) + +## v0.28.1 + +Yanked. Please migrate to `v0.28.2`. + +## v0.28.0 + +#### Upgrade notes + +- **New DB table for dynamic data sources.** + For new subgraph deployments, dynamic data sources will be recorded under the `sgd*.data_sources$` table, rather than `subgraphs.dynamic_ethereum_contract_data_source`. As a consequence new deployments will not work correctly on earlier graph node versions, so _downgrading to an earlier graph node version is not supported_. + See issue [#3405](https://github.com/graphprotocol/graph-node/issues/3405) for other details. + +### What's new + +- The filepath which "too expensive qeueries" are sourced from is now configurable. You can use either the `GRAPH_NODE_EXPENSIVE_QUERIES_FILE` environment variable or the `expensive_queries_filename` option in the TOML configuration. [#3710](https://github.com/graphprotocol/graph-node/pull/3710) +- The output you'll get from `graphman query` is less cluttered and overall nicer. The new options `--output` and `--trace` are available for detailed query information. [#3860](https://github.com/graphprotocol/graph-node/pull/3860) +- `docker build` will now `--target` the production build stage by default. When you want to get the debug build, you now need `--target graph-node-debug`. [#3814](https://github.com/graphprotocol/graph-node/pull/3814) +- Node IDs can now contain any character. The Docker start script still replaces hyphens with underscores for backwards compatibility reasons, but this behavior can be changed with the `GRAPH_NODE_ID_USE_LITERAL_VALUE` environment variable. With this new option, you can now seamlessly use the K8s-provided host names as node IDs, provided you reassign your deployments accordingly. [#3688](https://github.com/graphprotocol/graph-node/pull/3688) +- You can now use the `conn_pool_size` option in TOML configuration files to configure the connection pool size for Firehose providers. [#3833](https://github.com/graphprotocol/graph-node/pull/3833) +- Index nodes now have an endpoint to perform block number to canonical hash conversion, which will unblock further work towards multichain support. [#3942](https://github.com/graphprotocol/graph-node/pull/3942) +- `_meta.block.timestamp` is now available for subgraphs indexing EVM chains. [#3738](https://github.com/graphprotocol/graph-node/pull/3738), [#3902](https://github.com/graphprotocol/graph-node/pull/3902) +- The `deployment_eth_rpc_request_duration` metric now also observes `eth_getTransactionReceipt` requests' duration. [#3903](https://github.com/graphprotocol/graph-node/pull/3903) +- New Prometheus metrics `query_parsing_time` and `query_validation_time` for monitoring query processing performance. [#3760](https://github.com/graphprotocol/graph-node/pull/3760) +- New command `graphman config provider`, which shows what providers are available for new deployments on a given network and node. [#3816](https://github.com/graphprotocol/graph-node/pull/3816) + E.g. `$ graphman --node-id index_node_0 --config graph-node.toml config provider mainnet` +- Experimental support for GraphQL API versioning has landed. [#3185](https://github.com/graphprotocol/graph-node/pull/3185) +- Progress towards experimental support for off-chain data sources. [#3791](https://github.com/graphprotocol/graph-node/pull/3791) +- Experimental integration for substreams. [#3777](https://github.com/graphprotocol/graph-node/pull/3777), [#3784](https://github.com/graphprotocol/graph-node/pull/3784), [#3897](https://github.com/graphprotocol/graph-node/pull/3897), [#3765](https://github.com/graphprotocol/graph-node/pull/3765), and others -For new subgraph deployments, dynamic data sources will be recorded under the `sgd*.data_sources$` -table, rather than `subgraphs.dynamic_ethereum_contract_data_source`. As a consequence -new deployments will not work correctly on earlier graph node versions, so -_downgrading to an earlier graph node version is not supported_. -See issue #3405 for other details. +### Bug fixes + +- `graphman stats` now complains instead of failing silently when incorrectly setting `account-like` optimizations. [#3918](https://github.com/graphprotocol/graph-node/pull/3918) +- Fixed inconsistent logic in the provider selection when the `limit` TOML configuration option was set. [#3816](https://github.com/graphprotocol/graph-node/pull/3816) +- Fixed issues that would arise from dynamic data sources' names clashing against template names. [#3851](https://github.com/graphprotocol/graph-node/pull/3851) +- Dynamic data sources triggers are now processed by insertion order. [#3851](https://github.com/graphprotocol/graph-node/pull/3851), [#3854](https://github.com/graphprotocol/graph-node/pull/3854) +- When starting, the Docker image now replaces the `bash` process with the `graph-node` process (with a PID of 1). [#3803](https://github.com/graphprotocol/graph-node/pull/3803) +- Refactor subgraph store tests by @evaporei in https://github.com/graphprotocol/graph-node/pull/3662 +- The `ethereum_chain_head_number` metric doesn't get out of sync anymore on chains that use Firehose. [#3771](https://github.com/graphprotocol/graph-node/pull/3771), [#3732](https://github.com/graphprotocol/graph-node/issues/3732) +- Fixed a crash caused by bad block data from the provider. [#3944](https://github.com/graphprotocol/graph-node/pull/3944) +- Fixed some minor Firehose connectivity issues via TCP keepalive, connection and request timeouts, and connection window size tweaks. [#3822](https://github.com/graphprotocol/graph-node/pull/3822), [#3855](https://github.com/graphprotocol/graph-node/pull/3855), [#3877](https://github.com/graphprotocol/graph-node/pull/3877), [#3810](https://github.com/graphprotocol/graph-node/pull/3810), [#3818](https://github.com/graphprotocol/graph-node/pull/3818) +- Copying private data sources' tables across shards now works as expected. [#3836](https://github.com/graphprotocol/graph-node/pull/3836) + +### Performance improvements + +- Firehose GRPC stream requests are now compressed with `gzip`, if the server supports it. [#3893](https://github.com/graphprotocol/graph-node/pull/3893) +- Memory efficiency improvements within the entity cache. [#3594](https://github.com/graphprotocol/graph-node/pull/3594) +- Identical queries now benefit from GraphQL validation caching, and responses are served faster. [#3759](https://github.com/graphprotocol/graph-node/pull/3759) + +### Other + +- Avoid leaking some sensitive information in logs. [#3812](https://github.com/graphprotocol/graph-node/pull/3812) + +### Dependency updates + +| Dependency | PR(s) | Old version | Current version | +| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | --------------- | +| `serde_yaml` | [#3746](https://github.com/graphprotocol/graph-node/pull/3746) | `v0.8.24` | `v0.8.26` | +| `web3` | [#3806](https://github.com/graphprotocol/graph-node/pull/3806) | `2760dbd` | `7f8eb6d` | +| `clap` | [#3794](https://github.com/graphprotocol/graph-node/pull/3794), [#3848](https://github.com/graphprotocol/graph-node/pull/3848), [#3931](https://github.com/graphprotocol/graph-node/pull/3931) | `v3.2.8` | `3.2.21` | +| `cid` | [#3824](https://github.com/graphprotocol/graph-node/pull/3824) | `v0.8.5` | `v0.8.6` | +| `anyhow` | [#3826](https://github.com/graphprotocol/graph-node/pull/3826), [#3841](https://github.com/graphprotocol/graph-node/pull/3841), [#3865](https://github.com/graphprotocol/graph-node/pull/3865), [#3932](https://github.com/graphprotocol/graph-node/pull/3932) | `v1.0.57` | `1.0.65` | +| `chrono` | [#3827](https://github.com/graphprotocol/graph-node/pull/3827), [#3849](https://github.com/graphprotocol/graph-node/pull/3839), [#3868](https://github.com/graphprotocol/graph-node/pull/3868) | `v0.4.19` | `v0.4.22` | +| `proc-macro2` | [#3845](https://github.com/graphprotocol/graph-node/pull/3845) | `v1.0.40` | `1.0.43` | +| `ethabi` | [#3847](https://github.com/graphprotocol/graph-node/pull/3847) | `v17.1.0` | `v17.2.0` | +| `once_cell` | [#3870](https://github.com/graphprotocol/graph-node/pull/3870) | `v1.13.0` | `v1.13.1` | +| `either` | [#3869](https://github.com/graphprotocol/graph-node/pull/3869) | `v1.7.0` | `v1.8.0` | +| `sha2` | [#3904](https://github.com/graphprotocol/graph-node/pull/3904) | `v0.10.2` | `v0.10.5` | +| `mockall` | [#3776](https://github.com/graphprotocol/graph-node/pull/3776) | `v0.9.1` | removed | +| `croosbeam` | [#3772](https://github.com/graphprotocol/graph-node/pull/3772) | `v0.8.1` | `v0.8.2` | +| `async-recursion` | [#3873](https://github.com/graphprotocol/graph-node/pull/3873) | none | `v1.0.0` | + + ## 0.27.0 diff --git a/chain/arweave/Cargo.toml b/chain/arweave/Cargo.toml index 5d94dd946dc..c969e0a400a 100644 --- a/chain/arweave/Cargo.toml +++ b/chain/arweave/Cargo.toml @@ -1,18 +1,18 @@ [package] name = "graph-chain-arweave" -version = "0.27.0" -edition = "2021" +version.workspace = true +edition.workspace = true [build-dependencies] -tonic-build = { version = "0.7.1", features = ["prost"]} +tonic-build = { workspace = true } [dependencies] base64-url = "1.4.13" graph = { path = "../../graph" } -prost = "0.10.1" -prost-types = "0.10.1" +prost = { workspace = true } +prost-types = { workspace = true } serde = "1.0" -sha2 = "0.10.5" +sha2 = "0.10.6" graph-runtime-wasm = { path = "../../runtime/wasm" } graph-runtime-derive = { path = "../../runtime/derive" } diff --git a/chain/arweave/src/adapter.rs b/chain/arweave/src/adapter.rs index 70e50846927..fd2d962e31e 100644 --- a/chain/arweave/src/adapter.rs +++ b/chain/arweave/src/adapter.rs @@ -1,4 +1,3 @@ -use crate::capabilities::NodeCapabilities; use crate::{data_source::DataSource, Chain}; use graph::blockchain as bc; use graph::prelude::*; @@ -26,8 +25,8 @@ impl bc::TriggerFilter for TriggerFilter { transaction_filter.extend(ArweaveTransactionFilter::from_data_sources(data_sources)); } - fn node_capabilities(&self) -> NodeCapabilities { - NodeCapabilities {} + fn node_capabilities(&self) -> bc::EmptyNodeCapabilities { + bc::EmptyNodeCapabilities::default() } fn extend_with_template( @@ -72,9 +71,7 @@ impl ArweaveTransactionFilter { }) .map(|ds| match &ds.source.owner { Some(str) if MATCH_ALL_WILDCARD.eq(str) => MATCH_ALL_WILDCARD.as_bytes().to_owned(), - owner @ _ => { - base64_url::decode(&owner.clone().unwrap_or_default()).unwrap_or_default() - } + owner => base64_url::decode(&owner.clone().unwrap_or_default()).unwrap_or_default(), }) .collect(); @@ -86,11 +83,11 @@ impl ArweaveTransactionFilter { .into_iter() .partition::>, _>(|long| long.len() != MATCH_ALL_WILDCARD.len()); - let match_all = wildcard.len() != 0; + let match_all = !wildcard.is_empty(); let owners_sha: Vec> = owners_sha .into_iter() - .chain::>>(owners_pubkey.iter().map(|long| sha256(&long)).collect()) + .chain::>>(owners_pubkey.iter().map(|long| sha256(long)).collect()) .collect(); Self { @@ -158,8 +155,8 @@ mod test { fn transaction_filter_wildcard_matches_all() { let dss = vec![ new_datasource(None, 10), - new_datasource(Some(base64_url::encode(MATCH_ALL_WILDCARD.into())), 10), - new_datasource(Some(base64_url::encode("owner").into()), 10), + new_datasource(Some(base64_url::encode(MATCH_ALL_WILDCARD)), 10), + new_datasource(Some(base64_url::encode("owner")), 10), new_datasource(Some(ARWEAVE_PUBKEY_EXAMPLE.into()), 10), ]; diff --git a/chain/arweave/src/capabilities.rs b/chain/arweave/src/capabilities.rs deleted file mode 100644 index 27c7622aeb5..00000000000 --- a/chain/arweave/src/capabilities.rs +++ /dev/null @@ -1,37 +0,0 @@ -use graph::{anyhow::Error, impl_slog_value}; -use std::cmp::{Ordering, PartialOrd}; -use std::fmt; -use std::str::FromStr; - -use crate::data_source::DataSource; - -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub struct NodeCapabilities {} - -impl PartialOrd for NodeCapabilities { - fn partial_cmp(&self, _other: &Self) -> Option { - None - } -} - -impl FromStr for NodeCapabilities { - type Err = Error; - - fn from_str(_s: &str) -> Result { - Ok(NodeCapabilities {}) - } -} - -impl fmt::Display for NodeCapabilities { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("arweave") - } -} - -impl_slog_value!(NodeCapabilities, "{}"); - -impl graph::blockchain::NodeCapabilities for NodeCapabilities { - fn from_data_sources(_data_sources: &[DataSource]) -> Self { - NodeCapabilities {} - } -} diff --git a/chain/arweave/src/chain.rs b/chain/arweave/src/chain.rs index 14e52c11bef..6e11c3029b5 100644 --- a/chain/arweave/src/chain.rs +++ b/chain/arweave/src/chain.rs @@ -1,10 +1,9 @@ -use graph::blockchain::{Block, BlockchainKind}; +use graph::blockchain::{Block, BlockchainKind, EmptyNodeCapabilities}; use graph::cheap_clone::CheapClone; use graph::data::subgraph::UnifiedMappingApiVersion; use graph::firehose::{FirehoseEndpoint, FirehoseEndpoints}; use graph::prelude::{MetricsRegistry, TryFutureExt}; use graph::{ - anyhow, blockchain::{ block_stream::{ BlockStreamEvent, BlockWithTriggers, FirehoseError, @@ -21,7 +20,6 @@ use prost::Message; use std::sync::Arc; use crate::adapter::TriggerFilter; -use crate::capabilities::NodeCapabilities; use crate::data_source::{DataSourceTemplate, UnresolvedDataSourceTemplate}; use crate::runtime::RuntimeAdapter; use crate::trigger::{self, ArweaveTrigger}; @@ -83,7 +81,7 @@ impl Blockchain for Chain { type TriggerFilter = crate::adapter::TriggerFilter; - type NodeCapabilities = crate::capabilities::NodeCapabilities; + type NodeCapabilities = EmptyNodeCapabilities; fn triggers_adapter( &self, @@ -95,6 +93,18 @@ impl Blockchain for Chain { Ok(Arc::new(adapter)) } + fn is_refetch_block_required(&self) -> bool { + false + } + + async fn refetch_firehose_block( + &self, + _logger: &Logger, + _cursor: FirehoseCursor, + ) -> Result { + unimplemented!("This chain does not support Dynamic Data Sources. is_refetch_block_required always returns false, this shouldn't be called.") + } + async fn new_firehose_block_stream( &self, deployment: DeploymentLocator, @@ -105,22 +115,20 @@ impl Blockchain for Chain { unified_api_version: UnifiedMappingApiVersion, ) -> Result>, Error> { let adapter = self - .triggers_adapter(&deployment, &NodeCapabilities {}, unified_api_version) - .expect(&format!("no adapter for network {}", self.name,)); - - let firehose_endpoint = match self.firehose_endpoints.random() { - Some(e) => e.clone(), - None => return Err(anyhow::format_err!("no firehose endpoint available")), - }; + .triggers_adapter( + &deployment, + &EmptyNodeCapabilities::default(), + unified_api_version, + ) + .unwrap_or_else(|_| panic!("no adapter for network {}", self.name)); + let firehose_endpoint = self.firehose_endpoints.random()?; let logger = self .logger_factory .subgraph_logger(&deployment) .new(o!("component" => "FirehoseBlockStream")); - let firehose_mapper = Arc::new(FirehoseMapper { - endpoint: firehose_endpoint.cheap_clone(), - }); + let firehose_mapper = Arc::new(FirehoseMapper {}); Ok(Box::new(FirehoseBlockStream::new( deployment.hash, @@ -156,12 +164,8 @@ impl Blockchain for Chain { logger: &Logger, number: BlockNumber, ) -> Result { - let firehose_endpoint = match self.firehose_endpoints.random() { - Some(e) => e.clone(), - None => return Err(anyhow::format_err!("no firehose endpoint available").into()), - }; - - firehose_endpoint + self.firehose_endpoints + .random()? .block_ptr_for_number::(logger, number) .map_err(Into::into) .await @@ -191,7 +195,7 @@ impl TriggersAdapterTrait for TriggersAdapter { async fn triggers_in_block( &self, - _logger: &Logger, + logger: &Logger, block: codec::Block, filter: &TriggerFilter, ) -> Result, Error> { @@ -209,7 +213,7 @@ impl TriggersAdapterTrait for TriggersAdapter { .into_iter() .filter(|tx| transaction_filter.matches(&tx.owner)) .map(|tx| trigger::TransactionWithBlockPtr { - tx: Arc::new(tx.clone()), + tx: Arc::new(tx), block: shared_block.clone(), }) .collect::>(); @@ -223,7 +227,7 @@ impl TriggersAdapterTrait for TriggersAdapter { trigger_data.push(ArweaveTrigger::Block(shared_block.cheap_clone())); } - Ok(BlockWithTriggers::new(block, trigger_data)) + Ok(BlockWithTriggers::new(block, trigger_data, logger)) } async fn is_on_main_chain(&self, _ptr: BlockPtr) -> Result { @@ -249,9 +253,7 @@ impl TriggersAdapterTrait for TriggersAdapter { } } -pub struct FirehoseMapper { - endpoint: Arc, -} +pub struct FirehoseMapper {} #[async_trait] impl FirehoseMapperTrait for FirehoseMapper { @@ -301,11 +303,11 @@ impl FirehoseMapperTrait for FirehoseMapper { )) } - StepIrreversible => { + StepFinal => { panic!("irreversible step is not handled and should not be requested in the Firehose request") } - StepUnknown => { + StepUnset => { panic!("unknown step should not happen in the Firehose response") } } @@ -314,9 +316,10 @@ impl FirehoseMapperTrait for FirehoseMapper { async fn block_ptr_for_number( &self, logger: &Logger, + endpoint: &Arc, number: BlockNumber, ) -> Result { - self.endpoint + endpoint .block_ptr_for_number::(logger, number) .await } @@ -327,6 +330,7 @@ impl FirehoseMapperTrait for FirehoseMapper { async fn final_block_ptr_for( &self, _logger: &Logger, + _endpoint: &Arc, block: &codec::Block, ) -> Result { Ok(block.ptr()) diff --git a/chain/arweave/src/data_source.rs b/chain/arweave/src/data_source.rs index d378174edf8..1b66a3ce635 100644 --- a/chain/arweave/src/data_source.rs +++ b/chain/arweave/src/data_source.rs @@ -11,7 +11,7 @@ use graph::{ }, semver, }; -use std::{convert::TryFrom, sync::Arc}; +use std::sync::Arc; use crate::chain::Chain; use crate::trigger::ArweaveTrigger; @@ -31,6 +31,10 @@ pub struct DataSource { } impl blockchain::DataSource for DataSource { + fn from_template_info(_info: DataSourceTemplateInfo) -> Result { + Err(anyhow!("Arweave subgraphs do not support templates")) + } + // FIXME // // need to decode the base64url encoding? @@ -81,7 +85,7 @@ impl blockchain::DataSource for DataSource { } fn network(&self) -> Option<&str> { - self.network.as_ref().map(|s| s.as_str()) + self.network.as_deref() } fn context(&self) -> Arc> { @@ -235,17 +239,6 @@ impl blockchain::UnresolvedDataSource for UnresolvedDataSource { } } -/// # TODO -/// -/// add templates for arweave subgraphs -impl TryFrom> for DataSource { - type Error = Error; - - fn try_from(_info: DataSourceTemplateInfo) -> Result { - Err(anyhow!("Arweave subgraphs do not support templates")) - } -} - #[derive(Clone, Debug, Default, Hash, Eq, PartialEq, Deserialize)] pub struct BaseDataSourceTemplate { pub kind: String, diff --git a/chain/arweave/src/lib.rs b/chain/arweave/src/lib.rs index a497e77bf9d..77e63bc51ab 100644 --- a/chain/arweave/src/lib.rs +++ b/chain/arweave/src/lib.rs @@ -1,5 +1,4 @@ mod adapter; -mod capabilities; mod chain; mod codec; mod data_source; diff --git a/chain/arweave/src/protobuf/sf.arweave.r#type.v1.rs b/chain/arweave/src/protobuf/sf.arweave.r#type.v1.rs index 98a1359305a..fba41614f1b 100644 --- a/chain/arweave/src/protobuf/sf.arweave.r#type.v1.rs +++ b/chain/arweave/src/protobuf/sf.arweave.r#type.v1.rs @@ -1,141 +1,146 @@ +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BigInt { - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub bytes: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Block { /// Firehose block version (unrelated to Arweave block version) - #[prost(uint32, tag="1")] + #[prost(uint32, tag = "1")] pub ver: u32, /// The block identifier - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub indep_hash: ::prost::alloc::vec::Vec, /// The nonce chosen to solve the mining problem - #[prost(bytes="vec", tag="3")] + #[prost(bytes = "vec", tag = "3")] pub nonce: ::prost::alloc::vec::Vec, /// `indep_hash` of the previous block in the weave - #[prost(bytes="vec", tag="4")] + #[prost(bytes = "vec", tag = "4")] pub previous_block: ::prost::alloc::vec::Vec, /// POSIX time of block discovery - #[prost(uint64, tag="5")] + #[prost(uint64, tag = "5")] pub timestamp: u64, /// POSIX time of the last difficulty retarget - #[prost(uint64, tag="6")] + #[prost(uint64, tag = "6")] pub last_retarget: u64, /// Mining difficulty; the number `hash` must be greater than. - #[prost(message, optional, tag="7")] + #[prost(message, optional, tag = "7")] pub diff: ::core::option::Option, /// How many blocks have passed since the genesis block - #[prost(uint64, tag="8")] + #[prost(uint64, tag = "8")] pub height: u64, /// Mining solution hash of the block; must satisfy the mining difficulty - #[prost(bytes="vec", tag="9")] + #[prost(bytes = "vec", tag = "9")] pub hash: ::prost::alloc::vec::Vec, /// Merkle root of the tree of Merkle roots of block's transactions' data. - #[prost(bytes="vec", tag="10")] + #[prost(bytes = "vec", tag = "10")] pub tx_root: ::prost::alloc::vec::Vec, /// Transactions contained within this block - #[prost(message, repeated, tag="11")] + #[prost(message, repeated, tag = "11")] pub txs: ::prost::alloc::vec::Vec, /// The root hash of the Merkle Patricia Tree containing /// all wallet (account) balances and the identifiers /// of the last transactions posted by them; if any. - #[prost(bytes="vec", tag="12")] + #[prost(bytes = "vec", tag = "12")] pub wallet_list: ::prost::alloc::vec::Vec, /// (string or) Address of the account to receive the block rewards. Can also be unclaimed which is encoded as a null byte - #[prost(bytes="vec", tag="13")] + #[prost(bytes = "vec", tag = "13")] pub reward_addr: ::prost::alloc::vec::Vec, /// Tags that a block producer can add to a block - #[prost(message, repeated, tag="14")] + #[prost(message, repeated, tag = "14")] pub tags: ::prost::alloc::vec::Vec, /// Size of reward pool - #[prost(message, optional, tag="15")] + #[prost(message, optional, tag = "15")] pub reward_pool: ::core::option::Option, /// Size of the weave in bytes - #[prost(message, optional, tag="16")] + #[prost(message, optional, tag = "16")] pub weave_size: ::core::option::Option, /// Size of this block in bytes - #[prost(message, optional, tag="17")] + #[prost(message, optional, tag = "17")] pub block_size: ::core::option::Option, /// Required after the version 1.8 fork. Zero otherwise. /// The sum of the average number of hashes computed /// by the network to produce the past blocks including this one. - #[prost(message, optional, tag="18")] + #[prost(message, optional, tag = "18")] pub cumulative_diff: ::core::option::Option, /// Required after the version 1.8 fork. Null byte otherwise. /// The Merkle root of the block index - the list of {`indep_hash`; `weave_size`; `tx_root`} triplets - #[prost(bytes="vec", tag="20")] + #[prost(bytes = "vec", tag = "20")] pub hash_list_merkle: ::prost::alloc::vec::Vec, /// The proof of access; Used after v2.4 only; set as defaults otherwise - #[prost(message, optional, tag="21")] + #[prost(message, optional, tag = "21")] pub poa: ::core::option::Option, } /// A succinct proof of access to a recall byte found in a TX +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ProofOfAccess { /// The recall byte option chosen; global offset of index byte - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub option: ::prost::alloc::string::String, /// The path through the Merkle tree of transactions' `data_root`s; /// from the `data_root` being proven to the corresponding `tx_root` - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub tx_path: ::prost::alloc::vec::Vec, /// The path through the Merkle tree of identifiers of chunks of the /// corresponding transaction; from the chunk being proven to the /// corresponding `data_root`. - #[prost(bytes="vec", tag="3")] + #[prost(bytes = "vec", tag = "3")] pub data_path: ::prost::alloc::vec::Vec, /// The data chunk. - #[prost(bytes="vec", tag="4")] + #[prost(bytes = "vec", tag = "4")] pub chunk: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Transaction { /// 1 or 2 for v1 or v2 transactions. More allowable in the future - #[prost(uint32, tag="1")] + #[prost(uint32, tag = "1")] pub format: u32, /// The transaction identifier. - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub id: ::prost::alloc::vec::Vec, /// Either the identifier of the previous transaction from the same /// wallet or the identifier of one of the last ?MAX_TX_ANCHOR_DEPTH blocks. - #[prost(bytes="vec", tag="3")] + #[prost(bytes = "vec", tag = "3")] pub last_tx: ::prost::alloc::vec::Vec, /// The public key the transaction is signed with. - #[prost(bytes="vec", tag="4")] + #[prost(bytes = "vec", tag = "4")] pub owner: ::prost::alloc::vec::Vec, /// A list of arbitrary key-value pairs - #[prost(message, repeated, tag="5")] + #[prost(message, repeated, tag = "5")] pub tags: ::prost::alloc::vec::Vec, /// The address of the recipient; if any. The SHA2-256 hash of the public key. - #[prost(bytes="vec", tag="6")] + #[prost(bytes = "vec", tag = "6")] pub target: ::prost::alloc::vec::Vec, /// The amount of Winstons to send to the recipient; if any. - #[prost(message, optional, tag="7")] + #[prost(message, optional, tag = "7")] pub quantity: ::core::option::Option, /// The data to upload; if any. For v2 transactions; the field is optional /// - a fee is charged based on the `data_size` field; - /// data may be uploaded any time later in chunks. - #[prost(bytes="vec", tag="8")] + /// data may be uploaded any time later in chunks. + #[prost(bytes = "vec", tag = "8")] pub data: ::prost::alloc::vec::Vec, /// Size in bytes of the transaction data. - #[prost(message, optional, tag="9")] + #[prost(message, optional, tag = "9")] pub data_size: ::core::option::Option, /// The Merkle root of the Merkle tree of data chunks. - #[prost(bytes="vec", tag="10")] + #[prost(bytes = "vec", tag = "10")] pub data_root: ::prost::alloc::vec::Vec, /// The signature. - #[prost(bytes="vec", tag="11")] + #[prost(bytes = "vec", tag = "11")] pub signature: ::prost::alloc::vec::Vec, /// The fee in Winstons. - #[prost(message, optional, tag="12")] + #[prost(message, optional, tag = "12")] pub reward: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Tag { - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub name: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub value: ::prost::alloc::vec::Vec, } diff --git a/chain/arweave/src/runtime/abi.rs b/chain/arweave/src/runtime/abi.rs index 4f3d4005dc8..c7fe7f354f1 100644 --- a/chain/arweave/src/runtime/abi.rs +++ b/chain/arweave/src/runtime/abi.rs @@ -26,7 +26,7 @@ impl ToAscObj for Vec> { gas: &GasCounter, ) -> Result { let content = self - .into_iter() + .iter() .map(|x| asc_new(heap, x.as_slice(), gas)) .collect::>, _>>()?; Ok(AscTransactionArray(Array::new(&*content, heap, gas)?)) @@ -128,7 +128,7 @@ impl ToAscObj for codec::Block { &self .txs .iter() - .map(|tx| tx.id.clone().into()) + .map(|tx| tx.id.clone()) .collect::>>(), gas, )?, diff --git a/chain/common/Cargo.toml b/chain/common/Cargo.toml index 364e8e33a12..7ebb131d62e 100644 --- a/chain/common/Cargo.toml +++ b/chain/common/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "graph-chain-common" -version = "0.27.0" -edition = "2021" +version.workspace = true +edition.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] protobuf = "3.0.2" -protobuf-parse = "3.0.2" +protobuf-parse = "3.2.0" anyhow = "1" heck = "0.4" diff --git a/chain/common/src/lib.rs b/chain/common/src/lib.rs index f1e5aeb504c..95ae6ab7e34 100644 --- a/chain/common/src/lib.rs +++ b/chain/common/src/lib.rs @@ -12,7 +12,7 @@ use protobuf::UnknownValueRef; use std::convert::From; use std::path::Path; -const REQUIRED_ID: u32 = 66001; +const REQUIRED_ID: u32 = 77001; #[derive(Debug, Clone)] pub struct Field { @@ -41,7 +41,7 @@ impl PType { v.push(vv); } - if v.len() < 1 { + if v.is_empty() { None } else { Some(v.join(",")) @@ -103,23 +103,20 @@ impl From<&FieldDescriptorProto> for Field { let type_name = if let Some(type_name) = fd.type_name.as_ref() { type_name.to_owned() + } else if let Type::TYPE_BYTES = fd.type_() { + "Vec".to_owned() } else { - if let Type::TYPE_BYTES = fd.type_() { - "Vec".to_owned() - } else { - use heck::ToUpperCamelCase; - fd.name().to_string().to_upper_camel_case() - } + use heck::ToUpperCamelCase; + fd.name().to_string().to_upper_camel_case() }; Field { name: fd.name().to_owned(), - type_name: type_name.rsplit(".").next().unwrap().to_owned(), + type_name: type_name.rsplit('.').next().unwrap().to_owned(), required: options .iter() //(firehose.required) = true, UnknownValueRef::Varint(0) => false, UnknownValueRef::Varint(1) => true - .find(|f| f.0 == REQUIRED_ID && UnknownValueRef::Varint(1) == f.1) - .is_some(), + .any(|f| f.0 == REQUIRED_ID && UnknownValueRef::Varint(1) == f.1), is_enum: false, is_array: Label::LABEL_REPEATED == fd.label(), fields: vec![], @@ -154,7 +151,7 @@ impl From<&DescriptorProto> for PType { .iter() .filter(|fd| fd.oneof_index.is_some()) .filter(|fd| *fd.oneof_index.as_ref().unwrap() as usize == index) - .map(|fd| Field::from(fd)) + .map(Field::from) .collect::>(); fld @@ -165,7 +162,7 @@ impl From<&DescriptorProto> for PType { dp.field .iter() .filter(|fd| fd.oneof_index.is_none()) - .map(|fd| Field::from(fd)) + .map(Field::from) .collect::>(), ); diff --git a/chain/common/tests/resources/firehose/annotations.proto b/chain/common/tests/resources/firehose/annotations.proto index 2541e7f3576..1476c1ab08d 100644 --- a/chain/common/tests/resources/firehose/annotations.proto +++ b/chain/common/tests/resources/firehose/annotations.proto @@ -1,9 +1,11 @@ +syntax = "proto3"; + package firehose; -import "google/protobuf/descriptor.proto"; +option go_package = "github.com/streamingfast/pbgo/sf/firehose/v1;pbfirehose"; -// 66K range is arbitrary picked number, might be conflict +import "google/protobuf/descriptor.proto"; extend google.protobuf.FieldOptions { - optional bool required = 66001; + optional bool required = 77001; } diff --git a/chain/cosmos/Cargo.toml b/chain/cosmos/Cargo.toml index de085f5e27a..c932b5185ee 100644 --- a/chain/cosmos/Cargo.toml +++ b/chain/cosmos/Cargo.toml @@ -1,19 +1,19 @@ [package] name = "graph-chain-cosmos" -version = "0.27.0" +version.workspace = true edition = "2018" [build-dependencies] -tonic-build = { version = "0.7.1", features = ["prost"] } +tonic-build = { workspace = true } graph-chain-common = { path = "../common" } [dependencies] graph = { path = "../../graph" } -prost = "0.10.1" -prost-types = "0.10.1" +prost = { workspace = true } +prost-types = { workspace = true } serde = "1.0" anyhow = "1.0" -semver = "1.0.3" +semver = "1.0.16" graph-runtime-wasm = { path = "../../runtime/wasm" } graph-runtime-derive = { path = "../../runtime/derive" } diff --git a/chain/cosmos/proto/firehose/annotations.proto b/chain/cosmos/proto/firehose/annotations.proto index 2541e7f3576..1476c1ab08d 100644 --- a/chain/cosmos/proto/firehose/annotations.proto +++ b/chain/cosmos/proto/firehose/annotations.proto @@ -1,9 +1,11 @@ +syntax = "proto3"; + package firehose; -import "google/protobuf/descriptor.proto"; +option go_package = "github.com/streamingfast/pbgo/sf/firehose/v1;pbfirehose"; -// 66K range is arbitrary picked number, might be conflict +import "google/protobuf/descriptor.proto"; extend google.protobuf.FieldOptions { - optional bool required = 66001; + optional bool required = 77001; } diff --git a/chain/cosmos/proto/type.proto b/chain/cosmos/proto/type.proto index 19db384e1dc..c32502da1e9 100644 --- a/chain/cosmos/proto/type.proto +++ b/chain/cosmos/proto/type.proto @@ -33,6 +33,7 @@ message HeaderOnlyBlock { message EventData { Event event = 1 [(firehose.required) = true]; HeaderOnlyBlock block = 2 [(firehose.required) = true]; + TransactionContext tx = 3; } message TransactionData { @@ -40,6 +41,20 @@ message TransactionData { HeaderOnlyBlock block = 2 [(firehose.required) = true]; } +message MessageData { + google.protobuf.Any message = 1 [(firehose.required) = true]; + HeaderOnlyBlock block = 2 [(firehose.required) = true]; + TransactionContext tx = 3 [(firehose.required) = true]; +} + +message TransactionContext { + bytes hash = 1; + uint32 index = 2; + uint32 code = 3; + int64 gas_wanted = 4; + int64 gas_used = 5; +} + message Header { Consensus version = 1 [(gogoproto.nullable) = false]; string chain_id = 2 [(gogoproto.customname) = "ChainID"]; diff --git a/chain/cosmos/src/adapter.rs b/chain/cosmos/src/adapter.rs index d73b8b0ec55..746c91e2e07 100644 --- a/chain/cosmos/src/adapter.rs +++ b/chain/cosmos/src/adapter.rs @@ -3,7 +3,6 @@ use std::collections::HashSet; use prost::Message; use prost_types::Any; -use crate::capabilities::NodeCapabilities; use crate::{data_source::DataSource, Chain}; use graph::blockchain as bc; use graph::firehose::EventTypeFilter; @@ -25,8 +24,8 @@ impl bc::TriggerFilter for TriggerFilter { self.block_filter.extend_from_data_sources(data_sources); } - fn node_capabilities(&self) -> NodeCapabilities { - NodeCapabilities {} + fn node_capabilities(&self) -> bc::EmptyNodeCapabilities { + bc::EmptyNodeCapabilities::default() } fn extend_with_template( diff --git a/chain/cosmos/src/capabilities.rs b/chain/cosmos/src/capabilities.rs deleted file mode 100644 index 89058123534..00000000000 --- a/chain/cosmos/src/capabilities.rs +++ /dev/null @@ -1,33 +0,0 @@ -use std::cmp::PartialOrd; -use std::fmt; -use std::str::FromStr; - -use anyhow::Error; -use graph::impl_slog_value; - -use crate::DataSource; - -#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd)] -pub struct NodeCapabilities {} - -impl FromStr for NodeCapabilities { - type Err = Error; - - fn from_str(_s: &str) -> Result { - Ok(NodeCapabilities {}) - } -} - -impl fmt::Display for NodeCapabilities { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("cosmos") - } -} - -impl_slog_value!(NodeCapabilities, "{}"); - -impl graph::blockchain::NodeCapabilities for NodeCapabilities { - fn from_data_sources(_data_sources: &[DataSource]) -> Self { - NodeCapabilities {} - } -} diff --git a/chain/cosmos/src/chain.rs b/chain/cosmos/src/chain.rs index 9e6254662ed..cd6e95572f8 100644 --- a/chain/cosmos/src/chain.rs +++ b/chain/cosmos/src/chain.rs @@ -5,15 +5,14 @@ use graph::cheap_clone::CheapClone; use graph::data::subgraph::UnifiedMappingApiVersion; use graph::prelude::MetricsRegistry; use graph::{ - anyhow::anyhow, blockchain::{ block_stream::{ BlockStream, BlockStreamEvent, BlockWithTriggers, FirehoseError, FirehoseMapper as FirehoseMapperTrait, TriggersAdapter as TriggersAdapterTrait, }, firehose_block_stream::FirehoseBlockStream, - Block as _, BlockHash, BlockPtr, Blockchain, BlockchainKind, IngestorError, - RuntimeAdapter as RuntimeAdapterTrait, + Block as _, BlockHash, BlockPtr, Blockchain, BlockchainKind, EmptyNodeCapabilities, + IngestorError, RuntimeAdapter as RuntimeAdapterTrait, }, components::store::DeploymentLocator, firehose::{self, FirehoseEndpoint, FirehoseEndpoints, ForkStep}, @@ -21,7 +20,6 @@ use graph::{ }; use prost::Message; -use crate::capabilities::NodeCapabilities; use crate::data_source::{ DataSource, DataSourceTemplate, EventOrigin, UnresolvedDataSource, UnresolvedDataSourceTemplate, }; @@ -81,7 +79,18 @@ impl Blockchain for Chain { type TriggerFilter = TriggerFilter; - type NodeCapabilities = NodeCapabilities; + type NodeCapabilities = EmptyNodeCapabilities; + + fn is_refetch_block_required(&self) -> bool { + false + } + async fn refetch_firehose_block( + &self, + _logger: &Logger, + _cursor: FirehoseCursor, + ) -> Result { + unimplemented!("This chain does not support Dynamic Data Sources. is_refetch_block_required always returns false, this shouldn't be called.") + } fn triggers_adapter( &self, @@ -103,22 +112,21 @@ impl Blockchain for Chain { unified_api_version: UnifiedMappingApiVersion, ) -> Result>, Error> { let adapter = self - .triggers_adapter(&deployment, &NodeCapabilities {}, unified_api_version) + .triggers_adapter( + &deployment, + &EmptyNodeCapabilities::default(), + unified_api_version, + ) .unwrap_or_else(|_| panic!("no adapter for network {}", self.name)); - let firehose_endpoint = match self.firehose_endpoints.random() { - Some(e) => e.clone(), - None => return Err(anyhow!("no firehose endpoint available",)), - }; + let firehose_endpoint = self.firehose_endpoints.random()?; let logger = self .logger_factory .subgraph_logger(&deployment) .new(o!("component" => "FirehoseBlockStream")); - let firehose_mapper = Arc::new(FirehoseMapper { - endpoint: firehose_endpoint.cheap_clone(), - }); + let firehose_mapper = Arc::new(FirehoseMapper {}); Ok(Box::new(FirehoseBlockStream::new( deployment.hash, @@ -154,10 +162,7 @@ impl Blockchain for Chain { logger: &Logger, number: BlockNumber, ) -> Result { - let firehose_endpoint = match self.firehose_endpoints.random() { - Some(e) => e.clone(), - None => return Err(anyhow!("no firehose endpoint available").into()), - }; + let firehose_endpoint = self.firehose_endpoints.random()?; firehose_endpoint .block_ptr_for_number::(logger, number) @@ -178,6 +183,14 @@ pub struct TriggersAdapter {} #[async_trait] impl TriggersAdapterTrait for TriggersAdapter { + async fn ancestor_block( + &self, + _ptr: BlockPtr, + _offset: BlockNumber, + ) -> Result, Error> { + panic!("Should never be called since not used by FirehoseBlockStream") + } + async fn scan_triggers( &self, _from: BlockNumber, @@ -189,7 +202,7 @@ impl TriggersAdapterTrait for TriggersAdapter { async fn triggers_in_block( &self, - _logger: &Logger, + logger: &Logger, block: codec::Block, filter: &TriggerFilter, ) -> Result, Error> { @@ -204,10 +217,30 @@ impl TriggersAdapterTrait for TriggersAdapter { // block. This is not currently possible because EventData is automatically // generated. .filter_map(|event| { - filter_event_trigger(filter, event, &header_only_block, EventOrigin::BeginBlock) + filter_event_trigger( + filter, + event, + &header_only_block, + None, + EventOrigin::BeginBlock, + ) }) - .chain(shared_block.tx_events()?.cloned().filter_map(|event| { - filter_event_trigger(filter, event, &header_only_block, EventOrigin::DeliverTx) + .chain(shared_block.transactions().flat_map(|tx| { + tx.result + .as_ref() + .unwrap() + .events + .iter() + .filter_map(|e| { + filter_event_trigger( + filter, + e.clone(), + &header_only_block, + Some(build_tx_context(tx)), + EventOrigin::DeliverTx, + ) + }) + .collect::>() })) .chain( shared_block @@ -218,38 +251,44 @@ impl TriggersAdapterTrait for TriggersAdapter { filter, event, &header_only_block, + None, EventOrigin::EndBlock, ) }), ) .collect(); - triggers.extend( - shared_block - .transactions() - .cloned() - .map(|tx| CosmosTrigger::with_transaction(tx, header_only_block.clone())), - ); + triggers.extend(shared_block.transactions().cloned().flat_map(|tx_result| { + let mut triggers: Vec<_> = Vec::new(); + if let Some(tx) = tx_result.tx.clone() { + if let Some(tx_body) = tx.body { + triggers.extend(tx_body.messages.into_iter().map(|message| { + CosmosTrigger::with_message( + message, + header_only_block.clone(), + build_tx_context(&tx_result), + ) + })); + } + } + triggers.push(CosmosTrigger::with_transaction( + tx_result, + header_only_block.clone(), + )); + triggers + })); if filter.block_filter.trigger_every_block { triggers.push(CosmosTrigger::Block(shared_block.cheap_clone())); } - Ok(BlockWithTriggers::new(block, triggers)) + Ok(BlockWithTriggers::new(block, triggers, logger)) } async fn is_on_main_chain(&self, _ptr: BlockPtr) -> Result { panic!("Should never be called since not used by FirehoseBlockStream") } - async fn ancestor_block( - &self, - _ptr: BlockPtr, - _offset: BlockNumber, - ) -> Result, Error> { - panic!("Should never be called since not used by FirehoseBlockStream") - } - /// Panics if `block` is genesis. /// But that's ok since this is only called when reverting `block`. async fn parent_ptr(&self, block: &BlockPtr) -> Result, Error> { @@ -265,19 +304,33 @@ fn filter_event_trigger( filter: &TriggerFilter, event: codec::Event, block: &codec::HeaderOnlyBlock, + tx_context: Option, origin: EventOrigin, ) -> Option { if filter.event_type_filter.matches(&event.event_type) { - Some(CosmosTrigger::with_event(event, block.clone(), origin)) + Some(CosmosTrigger::with_event( + event, + block.clone(), + tx_context, + origin, + )) } else { None } } -pub struct FirehoseMapper { - endpoint: Arc, +fn build_tx_context(tx: &codec::TxResult) -> codec::TransactionContext { + codec::TransactionContext { + hash: tx.hash.clone(), + index: tx.index, + code: tx.result.as_ref().unwrap().code, + gas_wanted: tx.result.as_ref().unwrap().gas_wanted, + gas_used: tx.result.as_ref().unwrap().gas_used, + } } +pub struct FirehoseMapper {} + #[async_trait] impl FirehoseMapperTrait for FirehoseMapper { async fn to_block_stream_event( @@ -326,11 +379,13 @@ impl FirehoseMapperTrait for FirehoseMapper { )) } - ForkStep::StepIrreversible => { - panic!("irreversible step is not handled and should not be requested in the Firehose request") + ForkStep::StepFinal => { + panic!( + "final step is not handled and should not be requested in the Firehose request" + ) } - ForkStep::StepUnknown => { + ForkStep::StepUnset => { panic!("unknown step should not happen in the Firehose response") } } @@ -339,9 +394,10 @@ impl FirehoseMapperTrait for FirehoseMapper { async fn block_ptr_for_number( &self, logger: &Logger, + endpoint: &Arc, number: BlockNumber, ) -> Result { - self.endpoint + endpoint .block_ptr_for_number::(logger, number) .await } @@ -349,11 +405,11 @@ impl FirehoseMapperTrait for FirehoseMapper { async fn final_block_ptr_for( &self, logger: &Logger, + endpoint: &Arc, block: &codec::Block, ) -> Result { // Cosmos provides instant block finality. - self.endpoint - .block_ptr_for_number::(logger, block.number()) + self.block_ptr_for_number(logger, endpoint, block.number()) .await } } @@ -408,16 +464,19 @@ mod test { CosmosTrigger::with_event( Event::test_with_type("begin_event_3"), header_only_block.clone(), + None, EventOrigin::BeginBlock, ), CosmosTrigger::with_event( Event::test_with_type("tx_event_3"), header_only_block.clone(), + Some(build_tx_context(&block_with_events.transactions[2])), EventOrigin::DeliverTx, ), CosmosTrigger::with_event( Event::test_with_type("end_event_3"), header_only_block.clone(), + None, EventOrigin::EndBlock, ), CosmosTrigger::with_transaction( @@ -442,16 +501,19 @@ mod test { CosmosTrigger::with_event( Event::test_with_type("begin_event_3"), header_only_block.clone(), + None, EventOrigin::BeginBlock, ), CosmosTrigger::with_event( Event::test_with_type("tx_event_2"), header_only_block.clone(), + Some(build_tx_context(&block_with_events.transactions[1])), EventOrigin::DeliverTx, ), CosmosTrigger::with_event( Event::test_with_type("end_event_1"), header_only_block.clone(), + None, EventOrigin::EndBlock, ), CosmosTrigger::with_transaction( diff --git a/chain/cosmos/src/codec.rs b/chain/cosmos/src/codec.rs index 7b2a2997f90..fae145a449e 100644 --- a/chain/cosmos/src/codec.rs +++ b/chain/cosmos/src/codec.rs @@ -15,15 +15,6 @@ impl Block { .ok_or_else(|| anyhow!("block data missing header field")) } - pub fn events(&self) -> Result, Error> { - let events = self - .begin_block_events()? - .chain(self.tx_events()?) - .chain(self.end_block_events()?); - - Ok(events) - } - pub fn begin_block_events(&self) -> Result, Error> { let events = self .result_begin_block @@ -35,22 +26,6 @@ impl Block { Ok(events) } - pub fn tx_events(&self) -> Result, Error> { - if self.transactions.iter().any(|tx| tx.result.is_none()) { - return Err(anyhow!("block data transaction missing result field")); - } - - let events = self.transactions.iter().flat_map(|tx| { - tx.result - .as_ref() - .map(|b| b.events.iter()) - .into_iter() - .flatten() - }); - - Ok(events) - } - pub fn end_block_events(&self) -> Result, Error> { let events = self .result_end_block @@ -197,3 +172,17 @@ impl TransactionData { .ok_or_else(|| anyhow!("transaction data missing block field")) } } + +impl MessageData { + pub fn message(&self) -> Result<&prost_types::Any, Error> { + self.message + .as_ref() + .ok_or_else(|| anyhow!("message data missing message field")) + } + + pub fn block(&self) -> Result<&HeaderOnlyBlock, Error> { + self.block + .as_ref() + .ok_or_else(|| anyhow!("message data missing block field")) + } +} diff --git a/chain/cosmos/src/data_source.rs b/chain/cosmos/src/data_source.rs index 7535a50668e..3d6043b41bc 100644 --- a/chain/cosmos/src/data_source.rs +++ b/chain/cosmos/src/data_source.rs @@ -1,5 +1,5 @@ use std::collections::{HashMap, HashSet}; -use std::{convert::TryFrom, sync::Arc}; +use std::sync::Arc; use anyhow::{Error, Result}; @@ -37,6 +37,10 @@ pub struct DataSource { } impl blockchain::DataSource for DataSource { + fn from_template_info(_template_info: DataSourceTemplateInfo) -> Result { + Err(anyhow!(TEMPLATE_ERROR)) + } + fn address(&self) -> Option<&[u8]> { None } @@ -72,6 +76,13 @@ impl blockchain::DataSource for DataSource { Some(handler) => handler.handler, None => return Ok(None), }, + + CosmosTrigger::Message(message_data) => { + match self.handler_for_message(message_data.message()?) { + Some(handler) => handler.handler, + None => return Ok(None), + } + } }; Ok(Some(TriggerWithHandler::::new( @@ -125,6 +136,7 @@ impl blockchain::DataSource for DataSource { && mapping.block_handlers == other.mapping.block_handlers && mapping.event_handlers == other.mapping.event_handlers && mapping.transaction_handlers == other.mapping.transaction_handlers + && mapping.message_handlers == other.mapping.message_handlers && context == &other.context } @@ -184,10 +196,16 @@ impl blockchain::DataSource for DataSource { // OR // 1 or more handlers with origin filter for (event_type, origins) in event_types.iter() { - if origins.len() > 1 { - if !origins.iter().all(Option::is_some) { - errors.push(combined_origins_err(event_type)) - } + if origins.len() > 1 && !origins.iter().all(Option::is_some) { + errors.push(combined_origins_err(event_type)) + } + } + + // Ensure each message handlers is unique + let mut message_type_urls = HashSet::with_capacity(self.mapping.message_handlers.len()); + for message_handler in self.mapping.message_handlers.iter() { + if !message_type_urls.insert(message_handler.message.clone()) { + errors.push(duplicate_url_type(&message_handler.message)) } } @@ -234,6 +252,14 @@ impl DataSource { self.mapping.transaction_handlers.first().cloned() } + fn handler_for_message(&self, message: &::prost_types::Any) -> Option { + self.mapping + .message_handlers + .iter() + .find(|handler| handler.message == message.type_url) + .cloned() + } + fn handler_for_event( &self, event: &codec::Event, @@ -302,14 +328,6 @@ impl blockchain::UnresolvedDataSource for UnresolvedDataSource { } } -impl TryFrom> for DataSource { - type Error = Error; - - fn try_from(_info: DataSourceTemplateInfo) -> Result { - Err(anyhow!(TEMPLATE_ERROR)) - } -} - #[derive(Clone, Debug, Default, Hash, Eq, PartialEq, Deserialize)] pub struct BaseDataSourceTemplate { pub kind: String, @@ -363,6 +381,8 @@ pub struct UnresolvedMapping { pub event_handlers: Vec, #[serde(default)] pub transaction_handlers: Vec, + #[serde(default)] + pub message_handlers: Vec, pub file: Link, } @@ -379,6 +399,7 @@ impl UnresolvedMapping { block_handlers, event_handlers, transaction_handlers, + message_handlers, file: link, } = self; @@ -394,6 +415,7 @@ impl UnresolvedMapping { block_handlers: block_handlers.clone(), event_handlers: event_handlers.clone(), transaction_handlers: transaction_handlers.clone(), + message_handlers: message_handlers.clone(), runtime: Arc::new(module_bytes), link, }) @@ -408,6 +430,7 @@ pub struct Mapping { pub block_handlers: Vec, pub event_handlers: Vec, pub transaction_handlers: Vec, + pub message_handlers: Vec, pub runtime: Arc>, pub link: Link, } @@ -429,6 +452,12 @@ pub struct MappingTransactionHandler { pub handler: String, } +#[derive(Clone, Debug, Hash, Eq, PartialEq, Deserialize)] +pub struct MappingMessageHandler { + pub message: String, + pub handler: String, +} + #[derive(Clone, Debug, Hash, Eq, PartialEq, Deserialize)] pub struct Source { #[serde(rename = "startBlock", default)] @@ -462,6 +491,13 @@ fn combined_origins_err(event_type: &str) -> Error { ) } +fn duplicate_url_type(message: &str) -> Error { + anyhow!( + "data source has more than one message handler for message {} ", + message + ) +} + #[cfg(test)] mod tests { use super::*; @@ -524,6 +560,54 @@ mod tests { } } + #[test] + fn test_message_handlers_duplicate() { + let cases = [ + ( + DataSource::with_message_handlers(vec![ + MappingMessageHandler { + handler: "handler".to_string(), + message: "message_0".to_string(), + }, + MappingMessageHandler { + handler: "handler".to_string(), + message: "message_1".to_string(), + }, + ]), + vec![], + ), + ( + DataSource::with_message_handlers(vec![ + MappingMessageHandler { + handler: "handler".to_string(), + message: "message_0".to_string(), + }, + MappingMessageHandler { + handler: "handler".to_string(), + message: "message_0".to_string(), + }, + ]), + vec![duplicate_url_type("message_0")], + ), + ]; + + for (data_source, errors) in &cases { + let validation_errors = data_source.validate(); + + assert_eq!(errors.len(), validation_errors.len()); + + for error in errors.iter() { + assert!( + validation_errors + .iter() + .any(|validation_error| validation_error.to_string() == error.to_string()), + r#"expected "{}" to be in validation errors, but it wasn't"#, + error + ); + } + } + } + impl DataSource { fn with_event_handlers(event_handlers: Vec) -> DataSource { DataSource { @@ -538,6 +622,29 @@ mod tests { block_handlers: vec![], event_handlers, transaction_handlers: vec![], + message_handlers: vec![], + runtime: Arc::new(vec![]), + link: "test".to_string().into(), + }, + context: Arc::new(None), + creation_block: None, + } + } + + fn with_message_handlers(message_handlers: Vec) -> DataSource { + DataSource { + kind: "cosmos".to_string(), + network: None, + name: "Test".to_string(), + source: Source { start_block: 1 }, + mapping: Mapping { + api_version: semver::Version::new(0, 0, 0), + language: "".to_string(), + entities: vec![], + block_handlers: vec![], + event_handlers: vec![], + transaction_handlers: vec![], + message_handlers, runtime: Arc::new(vec![]), link: "test".to_string().into(), }, diff --git a/chain/cosmos/src/lib.rs b/chain/cosmos/src/lib.rs index 634cbe01ebd..6d84b61947e 100644 --- a/chain/cosmos/src/lib.rs +++ b/chain/cosmos/src/lib.rs @@ -1,5 +1,4 @@ mod adapter; -mod capabilities; pub mod chain; pub mod codec; mod data_source; diff --git a/chain/cosmos/src/protobuf/sf.cosmos.r#type.v1.rs b/chain/cosmos/src/protobuf/sf.cosmos.r#type.v1.rs index d720b3980e6..d60de8086b1 100644 --- a/chain/cosmos/src/protobuf/sf.cosmos.r#type.v1.rs +++ b/chain/cosmos/src/protobuf/sf.cosmos.r#type.v1.rs @@ -1,21 +1,30 @@ -#[graph_runtime_derive::generate_asc_type(__required__{header: Header,result_begin_block: ResponseBeginBlock,result_end_block: ResponseEndBlock})] -#[graph_runtime_derive::generate_network_type_id(Cosmos)] -#[graph_runtime_derive::generate_from_rust_type(__required__{header: Header,result_begin_block: ResponseBeginBlock,result_end_block: ResponseEndBlock})] +#[graph_runtime_derive::generate_asc_type( + __required__{header:Header, + result_begin_block:ResponseBeginBlock, + result_end_block:ResponseEndBlock} +)] +#[graph_runtime_derive::generate_network_type_id(Cosmos)] +#[graph_runtime_derive::generate_from_rust_type( + __required__{header:Header, + result_begin_block:ResponseBeginBlock, + result_end_block:ResponseEndBlock} +)] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Block { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub header: ::core::option::Option
, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub evidence: ::core::option::Option, - #[prost(message, optional, tag="3")] + #[prost(message, optional, tag = "3")] pub last_commit: ::core::option::Option, - #[prost(message, optional, tag="4")] + #[prost(message, optional, tag = "4")] pub result_begin_block: ::core::option::Option, - #[prost(message, optional, tag="5")] + #[prost(message, optional, tag = "5")] pub result_end_block: ::core::option::Option, - #[prost(message, repeated, tag="7")] + #[prost(message, repeated, tag = "7")] pub transactions: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag="8")] + #[prost(message, repeated, tag = "8")] pub validator_updates: ::prost::alloc::vec::Vec, } /// HeaderOnlyBlock is a standard \[Block\] structure where all other fields are @@ -24,595 +33,711 @@ pub struct Block { /// /// This can be used to unpack a \[Block\] when only the \[Header\] information /// is required and greatly reduce required memory. -#[graph_runtime_derive::generate_asc_type(__required__{header: Header})] +#[graph_runtime_derive::generate_asc_type(__required__{header:Header})] #[graph_runtime_derive::generate_network_type_id(Cosmos)] -#[graph_runtime_derive::generate_from_rust_type(__required__{header: Header})] +#[graph_runtime_derive::generate_from_rust_type(__required__{header:Header})] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct HeaderOnlyBlock { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub header: ::core::option::Option
, } -#[graph_runtime_derive::generate_asc_type(__required__{event: Event,block: HeaderOnlyBlock})] +#[graph_runtime_derive::generate_asc_type( + __required__{event:Event, + block:HeaderOnlyBlock} +)] #[graph_runtime_derive::generate_network_type_id(Cosmos)] -#[graph_runtime_derive::generate_from_rust_type(__required__{event: Event,block: HeaderOnlyBlock})] +#[graph_runtime_derive::generate_from_rust_type( + __required__{event:Event, + block:HeaderOnlyBlock} +)] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EventData { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub event: ::core::option::Option, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub block: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub tx: ::core::option::Option, } -#[graph_runtime_derive::generate_asc_type(__required__{tx: TxResult,block: HeaderOnlyBlock})] +#[graph_runtime_derive::generate_asc_type( + __required__{tx:TxResult, + block:HeaderOnlyBlock} +)] #[graph_runtime_derive::generate_network_type_id(Cosmos)] -#[graph_runtime_derive::generate_from_rust_type(__required__{tx: TxResult,block: HeaderOnlyBlock})] +#[graph_runtime_derive::generate_from_rust_type( + __required__{tx:TxResult, + block:HeaderOnlyBlock} +)] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionData { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub tx: ::core::option::Option, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] + pub block: ::core::option::Option, +} +#[graph_runtime_derive::generate_asc_type( + __required__{message:Any, + block:HeaderOnlyBlock, + tx:TransactionContext} +)] +#[graph_runtime_derive::generate_network_type_id(Cosmos)] +#[graph_runtime_derive::generate_from_rust_type( + __required__{message:Any, + block:HeaderOnlyBlock, + tx:TransactionContext} +)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MessageData { + #[prost(message, optional, tag = "1")] + pub message: ::core::option::Option<::prost_types::Any>, + #[prost(message, optional, tag = "2")] pub block: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub tx: ::core::option::Option, +} +#[graph_runtime_derive::generate_asc_type()] +#[graph_runtime_derive::generate_network_type_id(Cosmos)] +#[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionContext { + #[prost(bytes = "vec", tag = "1")] + pub hash: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "2")] + pub index: u32, + #[prost(uint32, tag = "3")] + pub code: u32, + #[prost(int64, tag = "4")] + pub gas_wanted: i64, + #[prost(int64, tag = "5")] + pub gas_used: i64, } -#[graph_runtime_derive::generate_asc_type(__required__{last_block_id: BlockID})] +#[graph_runtime_derive::generate_asc_type(__required__{last_block_id:BlockID})] #[graph_runtime_derive::generate_network_type_id(Cosmos)] -#[graph_runtime_derive::generate_from_rust_type(__required__{last_block_id: BlockID})] +#[graph_runtime_derive::generate_from_rust_type(__required__{last_block_id:BlockID})] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Header { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub version: ::core::option::Option, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub chain_id: ::prost::alloc::string::String, - #[prost(uint64, tag="3")] + #[prost(uint64, tag = "3")] pub height: u64, - #[prost(message, optional, tag="4")] + #[prost(message, optional, tag = "4")] pub time: ::core::option::Option, - #[prost(message, optional, tag="5")] + #[prost(message, optional, tag = "5")] pub last_block_id: ::core::option::Option, - #[prost(bytes="vec", tag="6")] + #[prost(bytes = "vec", tag = "6")] pub last_commit_hash: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="7")] + #[prost(bytes = "vec", tag = "7")] pub data_hash: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="8")] + #[prost(bytes = "vec", tag = "8")] pub validators_hash: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="9")] + #[prost(bytes = "vec", tag = "9")] pub next_validators_hash: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="10")] + #[prost(bytes = "vec", tag = "10")] pub consensus_hash: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="11")] + #[prost(bytes = "vec", tag = "11")] pub app_hash: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="12")] + #[prost(bytes = "vec", tag = "12")] pub last_results_hash: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="13")] + #[prost(bytes = "vec", tag = "13")] pub evidence_hash: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="14")] + #[prost(bytes = "vec", tag = "14")] pub proposer_address: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="15")] + #[prost(bytes = "vec", tag = "15")] pub hash: ::prost::alloc::vec::Vec, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Consensus { - #[prost(uint64, tag="1")] + #[prost(uint64, tag = "1")] pub block: u64, - #[prost(uint64, tag="2")] + #[prost(uint64, tag = "2")] pub app: u64, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Timestamp { - #[prost(int64, tag="1")] + #[prost(int64, tag = "1")] pub seconds: i64, - #[prost(int32, tag="2")] + #[prost(int32, tag = "2")] pub nanos: i32, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BlockId { - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub hash: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub part_set_header: ::core::option::Option, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct PartSetHeader { - #[prost(uint32, tag="1")] + #[prost(uint32, tag = "1")] pub total: u32, - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub hash: ::prost::alloc::vec::Vec, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EvidenceList { - #[prost(message, repeated, tag="1")] + #[prost(message, repeated, tag = "1")] pub evidence: ::prost::alloc::vec::Vec, } -#[graph_runtime_derive::generate_asc_type(sum{duplicate_vote_evidence: DuplicateVoteEvidence,light_client_attack_evidence: LightClientAttackEvidence})] +#[graph_runtime_derive::generate_asc_type( + sum{duplicate_vote_evidence:DuplicateVoteEvidence, + light_client_attack_evidence:LightClientAttackEvidence} +)] #[graph_runtime_derive::generate_network_type_id(Cosmos)] -#[graph_runtime_derive::generate_from_rust_type(sum{duplicate_vote_evidence: DuplicateVoteEvidence,light_client_attack_evidence: LightClientAttackEvidence})] +#[graph_runtime_derive::generate_from_rust_type( + sum{duplicate_vote_evidence:DuplicateVoteEvidence, + light_client_attack_evidence:LightClientAttackEvidence} +)] #[graph_runtime_derive::generate_array_type(Cosmos)] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Evidence { - #[prost(oneof="evidence::Sum", tags="1, 2")] + #[prost(oneof = "evidence::Sum", tags = "1, 2")] pub sum: ::core::option::Option, } /// Nested message and enum types in `Evidence`. pub mod evidence { + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Sum { - #[prost(message, tag="1")] + #[prost(message, tag = "1")] DuplicateVoteEvidence(super::DuplicateVoteEvidence), - #[prost(message, tag="2")] + #[prost(message, tag = "2")] LightClientAttackEvidence(super::LightClientAttackEvidence), } } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DuplicateVoteEvidence { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub vote_a: ::core::option::Option, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub vote_b: ::core::option::Option, - #[prost(int64, tag="3")] + #[prost(int64, tag = "3")] pub total_voting_power: i64, - #[prost(int64, tag="4")] + #[prost(int64, tag = "4")] pub validator_power: i64, - #[prost(message, optional, tag="5")] + #[prost(message, optional, tag = "5")] pub timestamp: ::core::option::Option, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EventVote { - #[prost(enumeration="SignedMsgType", tag="1")] + #[prost(enumeration = "SignedMsgType", tag = "1")] pub event_vote_type: i32, - #[prost(uint64, tag="2")] + #[prost(uint64, tag = "2")] pub height: u64, - #[prost(int32, tag="3")] + #[prost(int32, tag = "3")] pub round: i32, - #[prost(message, optional, tag="4")] + #[prost(message, optional, tag = "4")] pub block_id: ::core::option::Option, - #[prost(message, optional, tag="5")] + #[prost(message, optional, tag = "5")] pub timestamp: ::core::option::Option, - #[prost(bytes="vec", tag="6")] + #[prost(bytes = "vec", tag = "6")] pub validator_address: ::prost::alloc::vec::Vec, - #[prost(int32, tag="7")] + #[prost(int32, tag = "7")] pub validator_index: i32, - #[prost(bytes="vec", tag="8")] + #[prost(bytes = "vec", tag = "8")] pub signature: ::prost::alloc::vec::Vec, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct LightClientAttackEvidence { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub conflicting_block: ::core::option::Option, - #[prost(int64, tag="2")] + #[prost(int64, tag = "2")] pub common_height: i64, - #[prost(message, repeated, tag="3")] + #[prost(message, repeated, tag = "3")] pub byzantine_validators: ::prost::alloc::vec::Vec, - #[prost(int64, tag="4")] + #[prost(int64, tag = "4")] pub total_voting_power: i64, - #[prost(message, optional, tag="5")] + #[prost(message, optional, tag = "5")] pub timestamp: ::core::option::Option, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct LightBlock { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub signed_header: ::core::option::Option, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub validator_set: ::core::option::Option, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SignedHeader { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub header: ::core::option::Option
, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub commit: ::core::option::Option, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Commit { - #[prost(int64, tag="1")] + #[prost(int64, tag = "1")] pub height: i64, - #[prost(int32, tag="2")] + #[prost(int32, tag = "2")] pub round: i32, - #[prost(message, optional, tag="3")] + #[prost(message, optional, tag = "3")] pub block_id: ::core::option::Option, - #[prost(message, repeated, tag="4")] + #[prost(message, repeated, tag = "4")] pub signatures: ::prost::alloc::vec::Vec, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] #[graph_runtime_derive::generate_array_type(Cosmos)] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CommitSig { - #[prost(enumeration="BlockIdFlag", tag="1")] + #[prost(enumeration = "BlockIdFlag", tag = "1")] pub block_id_flag: i32, - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub validator_address: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag="3")] + #[prost(message, optional, tag = "3")] pub timestamp: ::core::option::Option, - #[prost(bytes="vec", tag="4")] + #[prost(bytes = "vec", tag = "4")] pub signature: ::prost::alloc::vec::Vec, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ValidatorSet { - #[prost(message, repeated, tag="1")] + #[prost(message, repeated, tag = "1")] pub validators: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub proposer: ::core::option::Option, - #[prost(int64, tag="3")] + #[prost(int64, tag = "3")] pub total_voting_power: i64, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] #[graph_runtime_derive::generate_array_type(Cosmos)] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Validator { - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub address: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub pub_key: ::core::option::Option, - #[prost(int64, tag="3")] + #[prost(int64, tag = "3")] pub voting_power: i64, - #[prost(int64, tag="4")] + #[prost(int64, tag = "4")] pub proposer_priority: i64, } -#[graph_runtime_derive::generate_asc_type(sum{ed25519: Vec,secp256k1: Vec})] +#[graph_runtime_derive::generate_asc_type(sum{ed25519:Vec, secp256k1:Vec})] #[graph_runtime_derive::generate_network_type_id(Cosmos)] -#[graph_runtime_derive::generate_from_rust_type(sum{ed25519: Vec,secp256k1: Vec})] +#[graph_runtime_derive::generate_from_rust_type(sum{ed25519:Vec, secp256k1:Vec})] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct PublicKey { - #[prost(oneof="public_key::Sum", tags="1, 2")] + #[prost(oneof = "public_key::Sum", tags = "1, 2")] pub sum: ::core::option::Option, } /// Nested message and enum types in `PublicKey`. pub mod public_key { + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Sum { - #[prost(bytes, tag="1")] + #[prost(bytes, tag = "1")] Ed25519(::prost::alloc::vec::Vec), - #[prost(bytes, tag="2")] + #[prost(bytes, tag = "2")] Secp256k1(::prost::alloc::vec::Vec), } } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ResponseBeginBlock { - #[prost(message, repeated, tag="1")] + #[prost(message, repeated, tag = "1")] pub events: ::prost::alloc::vec::Vec, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] #[graph_runtime_derive::generate_array_type(Cosmos)] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Event { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub event_type: ::prost::alloc::string::String, - #[prost(message, repeated, tag="2")] + #[prost(message, repeated, tag = "2")] pub attributes: ::prost::alloc::vec::Vec, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] #[graph_runtime_derive::generate_array_type(Cosmos)] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EventAttribute { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub key: ::prost::alloc::string::String, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub value: ::prost::alloc::string::String, - #[prost(bool, tag="3")] + #[prost(bool, tag = "3")] pub index: bool, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ResponseEndBlock { - #[prost(message, repeated, tag="1")] + #[prost(message, repeated, tag = "1")] pub validator_updates: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub consensus_param_updates: ::core::option::Option, - #[prost(message, repeated, tag="3")] + #[prost(message, repeated, tag = "3")] pub events: ::prost::alloc::vec::Vec, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] #[graph_runtime_derive::generate_array_type(Cosmos)] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ValidatorUpdate { - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub address: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub pub_key: ::core::option::Option, - #[prost(int64, tag="3")] + #[prost(int64, tag = "3")] pub power: i64, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ConsensusParams { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub block: ::core::option::Option, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub evidence: ::core::option::Option, - #[prost(message, optional, tag="3")] + #[prost(message, optional, tag = "3")] pub validator: ::core::option::Option, - #[prost(message, optional, tag="4")] + #[prost(message, optional, tag = "4")] pub version: ::core::option::Option, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BlockParams { - #[prost(int64, tag="1")] + #[prost(int64, tag = "1")] pub max_bytes: i64, - #[prost(int64, tag="2")] + #[prost(int64, tag = "2")] pub max_gas: i64, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EvidenceParams { - #[prost(int64, tag="1")] + #[prost(int64, tag = "1")] pub max_age_num_blocks: i64, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub max_age_duration: ::core::option::Option, - #[prost(int64, tag="3")] + #[prost(int64, tag = "3")] pub max_bytes: i64, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Duration { - #[prost(int64, tag="1")] + #[prost(int64, tag = "1")] pub seconds: i64, - #[prost(int32, tag="2")] + #[prost(int32, tag = "2")] pub nanos: i32, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ValidatorParams { - #[prost(string, repeated, tag="1")] + #[prost(string, repeated, tag = "1")] pub pub_key_types: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct VersionParams { - #[prost(uint64, tag="1")] + #[prost(uint64, tag = "1")] pub app_version: u64, } -#[graph_runtime_derive::generate_asc_type(__required__{tx: Tx,result: ResponseDeliverTx})] +#[graph_runtime_derive::generate_asc_type(__required__{tx:Tx, result:ResponseDeliverTx})] #[graph_runtime_derive::generate_network_type_id(Cosmos)] -#[graph_runtime_derive::generate_from_rust_type(__required__{tx: Tx,result: ResponseDeliverTx})] +#[graph_runtime_derive::generate_from_rust_type( + __required__{tx:Tx, + result:ResponseDeliverTx} +)] #[graph_runtime_derive::generate_array_type(Cosmos)] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TxResult { - #[prost(uint64, tag="1")] + #[prost(uint64, tag = "1")] pub height: u64, - #[prost(uint32, tag="2")] + #[prost(uint32, tag = "2")] pub index: u32, - #[prost(message, optional, tag="3")] + #[prost(message, optional, tag = "3")] pub tx: ::core::option::Option, - #[prost(message, optional, tag="4")] + #[prost(message, optional, tag = "4")] pub result: ::core::option::Option, - #[prost(bytes="vec", tag="5")] + #[prost(bytes = "vec", tag = "5")] pub hash: ::prost::alloc::vec::Vec, } -#[graph_runtime_derive::generate_asc_type(__required__{body: TxBody})] +#[graph_runtime_derive::generate_asc_type(__required__{body:TxBody})] #[graph_runtime_derive::generate_network_type_id(Cosmos)] -#[graph_runtime_derive::generate_from_rust_type(__required__{body: TxBody})] +#[graph_runtime_derive::generate_from_rust_type(__required__{body:TxBody})] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Tx { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub body: ::core::option::Option, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub auth_info: ::core::option::Option, - #[prost(bytes="vec", repeated, tag="3")] + #[prost(bytes = "vec", repeated, tag = "3")] pub signatures: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TxBody { - #[prost(message, repeated, tag="1")] + #[prost(message, repeated, tag = "1")] pub messages: ::prost::alloc::vec::Vec<::prost_types::Any>, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub memo: ::prost::alloc::string::String, - #[prost(uint64, tag="3")] + #[prost(uint64, tag = "3")] pub timeout_height: u64, - #[prost(message, repeated, tag="1023")] + #[prost(message, repeated, tag = "1023")] pub extension_options: ::prost::alloc::vec::Vec<::prost_types::Any>, - #[prost(message, repeated, tag="2047")] + #[prost(message, repeated, tag = "2047")] pub non_critical_extension_options: ::prost::alloc::vec::Vec<::prost_types::Any>, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] #[graph_runtime_derive::generate_array_type(Cosmos)] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Any { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub type_url: ::prost::alloc::string::String, - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub value: ::prost::alloc::vec::Vec, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AuthInfo { - #[prost(message, repeated, tag="1")] + #[prost(message, repeated, tag = "1")] pub signer_infos: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub fee: ::core::option::Option, - #[prost(message, optional, tag="3")] + #[prost(message, optional, tag = "3")] pub tip: ::core::option::Option, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] #[graph_runtime_derive::generate_array_type(Cosmos)] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SignerInfo { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub public_key: ::core::option::Option<::prost_types::Any>, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub mode_info: ::core::option::Option, - #[prost(uint64, tag="3")] + #[prost(uint64, tag = "3")] pub sequence: u64, } -#[graph_runtime_derive::generate_asc_type(sum{single: ModeInfoSingle,multi: ModeInfoMulti})] +#[graph_runtime_derive::generate_asc_type( + sum{single:ModeInfoSingle, + multi:ModeInfoMulti} +)] #[graph_runtime_derive::generate_network_type_id(Cosmos)] -#[graph_runtime_derive::generate_from_rust_type(sum{single: ModeInfoSingle,multi: ModeInfoMulti})] +#[graph_runtime_derive::generate_from_rust_type( + sum{single:ModeInfoSingle, + multi:ModeInfoMulti} +)] #[graph_runtime_derive::generate_array_type(Cosmos)] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ModeInfo { - #[prost(oneof="mode_info::Sum", tags="1, 2")] + #[prost(oneof = "mode_info::Sum", tags = "1, 2")] pub sum: ::core::option::Option, } /// Nested message and enum types in `ModeInfo`. pub mod mode_info { + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Sum { - #[prost(message, tag="1")] + #[prost(message, tag = "1")] Single(super::ModeInfoSingle), - #[prost(message, tag="2")] + #[prost(message, tag = "2")] Multi(super::ModeInfoMulti), } } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ModeInfoSingle { - #[prost(enumeration="SignMode", tag="1")] + #[prost(enumeration = "SignMode", tag = "1")] pub mode: i32, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ModeInfoMulti { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub bitarray: ::core::option::Option, - #[prost(message, repeated, tag="2")] + #[prost(message, repeated, tag = "2")] pub mode_infos: ::prost::alloc::vec::Vec, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CompactBitArray { - #[prost(uint32, tag="1")] + #[prost(uint32, tag = "1")] pub extra_bits_stored: u32, - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub elems: ::prost::alloc::vec::Vec, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Fee { - #[prost(message, repeated, tag="1")] + #[prost(message, repeated, tag = "1")] pub amount: ::prost::alloc::vec::Vec, - #[prost(uint64, tag="2")] + #[prost(uint64, tag = "2")] pub gas_limit: u64, - #[prost(string, tag="3")] + #[prost(string, tag = "3")] pub payer: ::prost::alloc::string::String, - #[prost(string, tag="4")] + #[prost(string, tag = "4")] pub granter: ::prost::alloc::string::String, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] #[graph_runtime_derive::generate_array_type(Cosmos)] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Coin { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub denom: ::prost::alloc::string::String, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub amount: ::prost::alloc::string::String, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Tip { - #[prost(message, repeated, tag="1")] + #[prost(message, repeated, tag = "1")] pub amount: ::prost::alloc::vec::Vec, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub tipper: ::prost::alloc::string::String, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ResponseDeliverTx { - #[prost(uint32, tag="1")] + #[prost(uint32, tag = "1")] pub code: u32, - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub data: ::prost::alloc::vec::Vec, - #[prost(string, tag="3")] + #[prost(string, tag = "3")] pub log: ::prost::alloc::string::String, - #[prost(string, tag="4")] + #[prost(string, tag = "4")] pub info: ::prost::alloc::string::String, - #[prost(int64, tag="5")] + #[prost(int64, tag = "5")] pub gas_wanted: i64, - #[prost(int64, tag="6")] + #[prost(int64, tag = "6")] pub gas_used: i64, - #[prost(message, repeated, tag="7")] + #[prost(message, repeated, tag = "7")] pub events: ::prost::alloc::vec::Vec, - #[prost(string, tag="8")] + #[prost(string, tag = "8")] pub codespace: ::prost::alloc::string::String, } #[graph_runtime_derive::generate_asc_type()] #[graph_runtime_derive::generate_network_type_id(Cosmos)] #[graph_runtime_derive::generate_from_rust_type()] +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ValidatorSetUpdates { - #[prost(message, repeated, tag="1")] + #[prost(message, repeated, tag = "1")] pub validator_updates: ::prost::alloc::vec::Vec, } #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] @@ -623,6 +748,30 @@ pub enum SignedMsgType { Precommit = 2, Proposal = 32, } +impl SignedMsgType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + SignedMsgType::Unknown => "SIGNED_MSG_TYPE_UNKNOWN", + SignedMsgType::Prevote => "SIGNED_MSG_TYPE_PREVOTE", + SignedMsgType::Precommit => "SIGNED_MSG_TYPE_PRECOMMIT", + SignedMsgType::Proposal => "SIGNED_MSG_TYPE_PROPOSAL", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SIGNED_MSG_TYPE_UNKNOWN" => Some(Self::Unknown), + "SIGNED_MSG_TYPE_PREVOTE" => Some(Self::Prevote), + "SIGNED_MSG_TYPE_PRECOMMIT" => Some(Self::Precommit), + "SIGNED_MSG_TYPE_PROPOSAL" => Some(Self::Proposal), + _ => None, + } + } +} #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum BlockIdFlag { @@ -631,6 +780,30 @@ pub enum BlockIdFlag { Commit = 2, Nil = 3, } +impl BlockIdFlag { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + BlockIdFlag::Unknown => "BLOCK_ID_FLAG_UNKNOWN", + BlockIdFlag::Absent => "BLOCK_ID_FLAG_ABSENT", + BlockIdFlag::Commit => "BLOCK_ID_FLAG_COMMIT", + BlockIdFlag::Nil => "BLOCK_ID_FLAG_NIL", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "BLOCK_ID_FLAG_UNKNOWN" => Some(Self::Unknown), + "BLOCK_ID_FLAG_ABSENT" => Some(Self::Absent), + "BLOCK_ID_FLAG_COMMIT" => Some(Self::Commit), + "BLOCK_ID_FLAG_NIL" => Some(Self::Nil), + _ => None, + } + } +} #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SignMode { @@ -639,3 +812,27 @@ pub enum SignMode { Textual = 2, LegacyAminoJson = 127, } +impl SignMode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + SignMode::Unspecified => "SIGN_MODE_UNSPECIFIED", + SignMode::Direct => "SIGN_MODE_DIRECT", + SignMode::Textual => "SIGN_MODE_TEXTUAL", + SignMode::LegacyAminoJson => "SIGN_MODE_LEGACY_AMINO_JSON", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SIGN_MODE_UNSPECIFIED" => Some(Self::Unspecified), + "SIGN_MODE_DIRECT" => Some(Self::Direct), + "SIGN_MODE_TEXTUAL" => Some(Self::Textual), + "SIGN_MODE_LEGACY_AMINO_JSON" => Some(Self::LegacyAminoJson), + _ => None, + } + } +} diff --git a/chain/cosmos/src/runtime/mod.rs b/chain/cosmos/src/runtime/mod.rs index 17a8ec8f75e..77702f3ba90 100644 --- a/chain/cosmos/src/runtime/mod.rs +++ b/chain/cosmos/src/runtime/mod.rs @@ -64,6 +64,7 @@ mod test { assert_asc_bytes!(AscEventData { event: new_asc_ptr(), block: new_asc_ptr(), + tx: new_asc_ptr(), }); assert_asc_bytes!(AscTransactionData { @@ -71,6 +72,20 @@ mod test { block: new_asc_ptr(), }); + assert_asc_bytes!(AscMessageData { + message: new_asc_ptr(), + block: new_asc_ptr(), + tx: new_asc_ptr(), + }); + + assert_asc_bytes!(AscTransactionContext { + hash: new_asc_ptr(), + index: 20, + code: 20, + gas_wanted: 20, + gas_used: 20, + }); + assert_asc_bytes!(AscHeader { version: new_asc_ptr(), chain_id: new_asc_ptr(), diff --git a/chain/cosmos/src/trigger.rs b/chain/cosmos/src/trigger.rs index 35880c52e62..52a64e4b0f2 100644 --- a/chain/cosmos/src/trigger.rs +++ b/chain/cosmos/src/trigger.rs @@ -20,6 +20,7 @@ impl std::fmt::Debug for CosmosTrigger { origin: EventOrigin, }, Transaction, + Message, } let trigger_without_block = match self { @@ -29,6 +30,7 @@ impl std::fmt::Debug for CosmosTrigger { origin: *origin, }, CosmosTrigger::Transaction(_) => MappingTriggerWithoutBlock::Transaction, + CosmosTrigger::Message(_) => MappingTriggerWithoutBlock::Message, }; write!(f, "{:?}", trigger_without_block) @@ -49,6 +51,9 @@ impl ToAscPtr for CosmosTrigger { CosmosTrigger::Transaction(transaction_data) => { asc_new(heap, transaction_data.as_ref(), gas)?.erase() } + CosmosTrigger::Message(message_data) => { + asc_new(heap, message_data.as_ref(), gas)?.erase() + } }) } } @@ -61,6 +66,7 @@ pub enum CosmosTrigger { origin: EventOrigin, }, Transaction(Arc), + Message(Arc), } impl CheapClone for CosmosTrigger { @@ -74,6 +80,9 @@ impl CheapClone for CosmosTrigger { CosmosTrigger::Transaction(transaction_data) => { CosmosTrigger::Transaction(transaction_data.cheap_clone()) } + CosmosTrigger::Message(message_data) => { + CosmosTrigger::Message(message_data.cheap_clone()) + } } } } @@ -99,6 +108,7 @@ impl PartialEq for CosmosTrigger { } } (Self::Transaction(a_ptr), Self::Transaction(b_ptr)) => a_ptr == b_ptr, + (Self::Message(a_ptr), Self::Message(b_ptr)) => a_ptr == b_ptr, _ => false, } } @@ -110,12 +120,14 @@ impl CosmosTrigger { pub(crate) fn with_event( event: codec::Event, block: codec::HeaderOnlyBlock, + tx_context: Option, origin: EventOrigin, ) -> CosmosTrigger { CosmosTrigger::Event { event_data: Arc::new(codec::EventData { event: Some(event), block: Some(block), + tx: tx_context, }), origin, } @@ -131,6 +143,18 @@ impl CosmosTrigger { })) } + pub(crate) fn with_message( + message: ::prost_types::Any, + block: codec::HeaderOnlyBlock, + tx_context: codec::TransactionContext, + ) -> CosmosTrigger { + CosmosTrigger::Message(Arc::new(codec::MessageData { + message: Some(message), + block: Some(block), + tx: Some(tx_context), + })) + } + pub fn block_number(&self) -> Result { match self { CosmosTrigger::Block(block) => Ok(block.number()), @@ -138,6 +162,7 @@ impl CosmosTrigger { CosmosTrigger::Transaction(transaction_data) => { transaction_data.block().map(|b| b.number()) } + CosmosTrigger::Message(message_data) => message_data.block().map(|b| b.number()), } } @@ -148,6 +173,7 @@ impl CosmosTrigger { CosmosTrigger::Transaction(transaction_data) => { transaction_data.block().map(|b| b.hash()) } + CosmosTrigger::Message(message_data) => message_data.block().map(|b| b.hash()), } } } @@ -155,17 +181,13 @@ impl CosmosTrigger { impl Ord for CosmosTrigger { fn cmp(&self, other: &Self) -> Ordering { match (self, other) { - // Keep the order when comparing two block triggers - (Self::Block(..), Self::Block(..)) => Ordering::Equal, - - // Block triggers always come last - (Self::Block(..), _) => Ordering::Greater, - (_, Self::Block(..)) => Ordering::Less, - // Events have no intrinsic ordering information, so we keep the order in // which they are included in the `events` field (Self::Event { .. }, Self::Event { .. }) => Ordering::Equal, + // Keep the order when comparing two message triggers + (Self::Message(..), Self::Message(..)) => Ordering::Equal, + // Transactions are ordered by their index inside the block (Self::Transaction(a), Self::Transaction(b)) => { if let (Ok(a_tx_result), Ok(b_tx_result)) = (a.tx_result(), b.tx_result()) { @@ -175,9 +197,20 @@ impl Ord for CosmosTrigger { } } - // When comparing events and transactions, transactions go first - (Self::Transaction(..), Self::Event { .. }) => Ordering::Less, - (Self::Event { .. }, Self::Transaction(..)) => Ordering::Greater, + // Keep the order when comparing two block triggers + (Self::Block(..), Self::Block(..)) => Ordering::Equal, + + // Event triggers always come first + (Self::Event { .. }, _) => Ordering::Greater, + (_, Self::Event { .. }) => Ordering::Less, + + // Block triggers always come last + (Self::Block(..), _) => Ordering::Less, + (_, Self::Block(..)) => Ordering::Greater, + + // Message triggers before Transaction triggers + (Self::Message(..), Self::Transaction(..)) => Ordering::Greater, + (Self::Transaction(..), Self::Message(..)) => Ordering::Less, } } } @@ -208,7 +241,7 @@ impl TriggerData for CosmosTrigger { event.event_type, origin, ) } else { - "event in block".to_string() + "event".to_string() } } CosmosTrigger::Transaction(transaction_data) => { @@ -222,9 +255,105 @@ impl TriggerData for CosmosTrigger { response_deliver_tx.log ) } else { - "transaction block".to_string() + "transaction".to_string() + } + } + CosmosTrigger::Message(message_data) => { + if let (Ok(message), Ok(block_number), Ok(block_hash)) = ( + message_data.message(), + self.block_number(), + self.block_hash(), + ) { + format!( + "message type {}, block #{block_number}, hash {block_hash}", + message.type_url, + ) + } else { + "message".to_string() } } } } } + +#[cfg(test)] +mod tests { + use crate::codec::TxResult; + + use super::*; + + #[test] + fn test_cosmos_trigger_ordering() { + let event_trigger = CosmosTrigger::Event { + event_data: Arc::::new(codec::EventData { + ..Default::default() + }), + origin: EventOrigin::BeginBlock, + }; + let other_event_trigger = CosmosTrigger::Event { + event_data: Arc::::new(codec::EventData { + ..Default::default() + }), + origin: EventOrigin::BeginBlock, + }; + let message_trigger = + CosmosTrigger::Message(Arc::::new(codec::MessageData { + ..Default::default() + })); + let other_message_trigger = + CosmosTrigger::Message(Arc::::new(codec::MessageData { + ..Default::default() + })); + let transaction_trigger = CosmosTrigger::Transaction(Arc::::new( + codec::TransactionData { + block: None, + tx: Some(TxResult { + index: 1, + ..Default::default() + }), + }, + )); + let other_transaction_trigger = CosmosTrigger::Transaction( + Arc::::new(codec::TransactionData { + block: None, + tx: Some(TxResult { + index: 2, + ..Default::default() + }), + }), + ); + let block_trigger = CosmosTrigger::Block(Arc::::new(codec::Block { + ..Default::default() + })); + let other_block_trigger = CosmosTrigger::Block(Arc::::new(codec::Block { + ..Default::default() + })); + + assert_eq!(event_trigger.cmp(&block_trigger), Ordering::Greater); + assert_eq!(event_trigger.cmp(&transaction_trigger), Ordering::Greater); + assert_eq!(event_trigger.cmp(&message_trigger), Ordering::Greater); + assert_eq!(event_trigger.cmp(&other_event_trigger), Ordering::Equal); + + assert_eq!(message_trigger.cmp(&block_trigger), Ordering::Greater); + assert_eq!(message_trigger.cmp(&transaction_trigger), Ordering::Greater); + assert_eq!(message_trigger.cmp(&other_message_trigger), Ordering::Equal); + assert_eq!(message_trigger.cmp(&event_trigger), Ordering::Less); + + assert_eq!(transaction_trigger.cmp(&block_trigger), Ordering::Greater); + assert_eq!( + transaction_trigger.cmp(&other_transaction_trigger), + Ordering::Less + ); + assert_eq!( + other_transaction_trigger.cmp(&transaction_trigger), + Ordering::Greater + ); + assert_eq!(transaction_trigger.cmp(&message_trigger), Ordering::Less); + assert_eq!(transaction_trigger.cmp(&event_trigger), Ordering::Less); + + assert_eq!(block_trigger.cmp(&other_block_trigger), Ordering::Equal); + assert_eq!(block_trigger.cmp(&transaction_trigger), Ordering::Less); + assert_eq!(block_trigger.cmp(&message_trigger), Ordering::Less); + assert_eq!(block_trigger.cmp(&event_trigger), Ordering::Less); + } +} diff --git a/chain/ethereum/Cargo.toml b/chain/ethereum/Cargo.toml index b3143cd1f89..4a5a1180dcd 100644 --- a/chain/ethereum/Cargo.toml +++ b/chain/ethereum/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "graph-chain-ethereum" -version = "0.27.0" -edition = "2021" +version.workspace = true +edition.workspace = true [dependencies] envconfig = "0.10.0" @@ -11,22 +11,23 @@ jsonrpc-core = "18.0.0" graph = { path = "../../graph" } lazy_static = "1.2.0" serde = "1.0" -prost = "0.10.4" -prost-types = "0.10.1" +prost = { workspace = true } +prost-types = { workspace = true } dirs-next = "2.0" anyhow = "1.0" tiny-keccak = "1.5.0" hex = "0.4.3" -semver = "1.0.12" +semver = "1.0.16" -itertools = "0.10.3" +itertools = "0.10.5" graph-runtime-wasm = { path = "../../runtime/wasm" } graph-runtime-derive = { path = "../../runtime/derive" } [dev-dependencies] test-store = { path = "../../store/test-store" } -base64 = "0.13.0" +base64 = "0.20.0" +graph-mock = { path = "../../mock" } [build-dependencies] -tonic-build = { version = "0.7.2", features = ["prost"] } +tonic-build = { workspace = true } diff --git a/chain/ethereum/examples/firehose.rs b/chain/ethereum/examples/firehose.rs index d2088b2167a..37acba642e3 100644 --- a/chain/ethereum/examples/firehose.rs +++ b/chain/ethereum/examples/firehose.rs @@ -1,8 +1,9 @@ use anyhow::Error; use graph::{ env::env_var, + firehose::SubgraphLimit, prelude::{prost, tokio, tonic}, - {firehose, firehose::FirehoseEndpoint, firehose::ForkStep}, + {firehose, firehose::FirehoseEndpoint}, }; use graph_chain_ethereum::codec; use hex::ToHex; @@ -15,7 +16,7 @@ async fn main() -> Result<(), Error> { let mut cursor: Option = None; let token_env = env_var("SF_API_TOKEN", "".to_string()); let mut token: Option = None; - if token_env.len() > 0 { + if !token_env.is_empty() { token = Some(token_env); } @@ -25,7 +26,7 @@ async fn main() -> Result<(), Error> { token, false, false, - 1, + SubgraphLimit::Unlimited, )); loop { @@ -35,11 +36,11 @@ async fn main() -> Result<(), Error> { .stream_blocks(firehose::Request { start_block_num: 12369739, stop_block_num: 12369739, - start_cursor: match &cursor { + cursor: match &cursor { Some(c) => c.clone(), None => String::from(""), }, - fork_steps: vec![ForkStep::StepNew as i32, ForkStep::StepUndo as i32], + final_blocks_only: false, ..Default::default() }) .await @@ -87,7 +88,7 @@ async fn main() -> Result<(), Error> { }) }); - if logs.len() > 0 { + if !logs.is_empty() { println!("Transaction {}", trx.hash.encode_hex::()); logs.iter().for_each(|log| println!("{}", log)); } diff --git a/chain/ethereum/src/adapter.rs b/chain/ethereum/src/adapter.rs index 16b3035ae8a..fc2253ceaba 100644 --- a/chain/ethereum/src/adapter.rs +++ b/chain/ethereum/src/adapter.rs @@ -44,7 +44,7 @@ pub struct EthereumContractCall { #[derive(Error, Debug)] pub enum EthereumContractCallError { #[error("ABI error: {0}")] - ABIError(ABIError), + ABIError(#[from] ABIError), /// `Token` is not of expected `ParamType` #[error("type mismatch, token {0:?} is not of kind {1:?}")] TypeError(Token, ParamType), @@ -58,12 +58,6 @@ pub enum EthereumContractCallError { Timeout, } -impl From for EthereumContractCallError { - fn from(e: ABIError) -> Self { - EthereumContractCallError::ABIError(e) - } -} - #[derive(Copy, Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)] enum LogFilterNode { Contract(Address), @@ -149,7 +143,7 @@ impl bc::TriggerFilter for TriggerFilter { &mut self, data_sources: impl Iterator::DataSourceTemplate>, ) { - for data_source in data_sources.into_iter() { + for data_source in data_sources { self.log .extend(EthereumLogFilter::from_mapping(&data_source.mapping)); @@ -167,21 +161,18 @@ impl bc::TriggerFilter for TriggerFilter { trigger_every_block, } = self.block.clone(); - if trigger_every_block { - return Vec::new(); - } - let log_filters: Vec = self.log.into(); let mut call_filters: Vec = self.call.into(); call_filters.extend(Into::>::into(self.block)); - if call_filters.is_empty() && log_filters.is_empty() { + if call_filters.is_empty() && log_filters.is_empty() && !trigger_every_block { return Vec::new(); } let combined_filter = CombinedFilter { log_filters, call_filters, + send_all_block_headers: trigger_every_block, }; vec![Any { @@ -358,7 +349,7 @@ impl EthereumLogFilter { // Sanity checks: // - The filter is not a wildcard because all nodes have neighbors. // - The graph is bipartite. - assert!(filter.contracts.len() > 0 && filter.event_signatures.len() > 0); + assert!(!filter.contracts.is_empty() && !filter.event_signatures.is_empty()); assert!(filter.contracts.len() == 1 || filter.event_signatures.len() == 1); filters.push(filter); }; @@ -489,7 +480,7 @@ impl EthereumCallFilter { pub fn from_data_sources<'a>(iter: impl IntoIterator) -> Self { iter.into_iter() .filter_map(|data_source| data_source.address.map(|addr| (addr, data_source))) - .map(|(contract_addr, data_source)| { + .flat_map(|(contract_addr, data_source)| { let start_block = data_source.start_block; data_source .mapping @@ -500,7 +491,6 @@ impl EthereumCallFilter { (start_block, contract_addr, [sig[0], sig[1], sig[2], sig[3]]) }) }) - .flatten() .collect() } @@ -557,9 +547,9 @@ impl FromIterator<(BlockNumber, Address, FunctionSelector)> for EthereumCallFilt let mut lookup: HashMap)> = HashMap::new(); iter.into_iter() .for_each(|(start_block, address, function_signature)| { - if !lookup.contains_key(&address) { - lookup.insert(address, (start_block, HashSet::default())); - } + lookup + .entry(address) + .or_insert((start_block, HashSet::default())); lookup.get_mut(&address).map(|set| { if set.0 > start_block { set.0 = start_block @@ -582,7 +572,7 @@ impl From<&EthereumBlockFilter> for EthereumCallFilter { .contract_addresses .iter() .map(|(start_block_opt, address)| { - (address.clone(), (*start_block_opt, HashSet::default())) + (*address, (*start_block_opt, HashSet::default())) }) .collect::)>>(), wildcard_signatures: HashSet::new(), @@ -619,7 +609,7 @@ impl EthereumBlockFilter { pub fn from_mapping(mapping: &Mapping) -> Self { Self { contract_addresses: HashSet::new(), - trigger_every_block: mapping.block_handlers.len() != 0, + trigger_every_block: !mapping.block_handlers.is_empty(), } } @@ -1126,7 +1116,7 @@ mod tests { ); let mut combined_filter = &firehose_filter - .get(COMBINED_FILTER_TYPE_URL.into()) + .get(COMBINED_FILTER_TYPE_URL) .expect("a CombinedFilter") .value[..]; @@ -1136,6 +1126,7 @@ mod tests { let CombinedFilter { log_filters: mut actual_log_filters, call_filters: mut actual_call_filters, + send_all_block_headers: actual_send_all_block_headers, } = combined_filter; actual_call_filters.sort_by(|a, b| a.addresses.cmp(&b.addresses)); @@ -1144,15 +1135,77 @@ mod tests { } assert_eq!(expected_call_filters, actual_call_filters); + actual_log_filters.sort_by(|a, b| a.addresses.cmp(&b.addresses)); + for filter in actual_log_filters.iter_mut() { + filter.event_signatures.sort(); + } + assert_eq!(expected_log_filters, actual_log_filters); + assert_eq!(false, actual_send_all_block_headers); + } + + #[test] + fn ethereum_trigger_filter_to_firehose_every_block_plus_logfilter() { + let address = Address::from_low_u64_be; + let sig = H256::from_low_u64_le; + let mut filter = TriggerFilter { + log: EthereumLogFilter { + contracts_and_events_graph: GraphMap::new(), + wildcard_events: HashMap::new(), + }, + call: EthereumCallFilter { + contract_addresses_function_signatures: HashMap::new(), + wildcard_signatures: HashSet::new(), + }, + block: EthereumBlockFilter { + contract_addresses: HashSet::new(), + trigger_every_block: true, + }, + }; + + filter.log.contracts_and_events_graph.add_edge( + LogFilterNode::Contract(address(10)), + LogFilterNode::Event(sig(101)), + false, + ); + + let expected_log_filters = vec![LogFilter { + addresses: vec![address(10).to_fixed_bytes().to_vec()], + event_signatures: vec![sig(101).to_fixed_bytes().to_vec()], + }]; + + let firehose_filter = filter.clone().to_firehose_filter(); + assert_eq!(1, firehose_filter.len()); + + let firehose_filter: HashMap<_, _> = HashMap::from_iter::>( + firehose_filter + .into_iter() + .map(|any| (any.type_url.clone(), any)) + .collect_vec(), + ); + + let mut combined_filter = &firehose_filter + .get(COMBINED_FILTER_TYPE_URL) + .expect("a CombinedFilter") + .value[..]; + + let combined_filter = + CombinedFilter::decode(&mut combined_filter).expect("combined filter to decode"); + + let CombinedFilter { + log_filters: mut actual_log_filters, + call_filters: actual_call_filters, + send_all_block_headers: actual_send_all_block_headers, + } = combined_filter; + + assert_eq!(0, actual_call_filters.len()); + actual_log_filters.sort_by(|a, b| a.addresses.cmp(&b.addresses)); for filter in actual_log_filters.iter_mut() { filter.event_signatures.sort(); } assert_eq!(expected_log_filters, actual_log_filters); - filter.block.trigger_every_block = true; - let firehose_filter = filter.to_firehose_filter(); - assert_eq!(firehose_filter.len(), 0); + assert_eq!(true, actual_send_all_block_headers); } #[test] @@ -1466,8 +1519,7 @@ fn complete_log_filter() { assert_eq!( logs_filters .iter() - .map(|l| l.contracts.iter()) - .flatten() + .flat_map(|l| l.contracts.iter()) .copied() .collect::>(), contracts @@ -1475,8 +1527,7 @@ fn complete_log_filter() { assert_eq!( logs_filters .iter() - .map(|l| l.event_signatures.iter()) - .flatten() + .flat_map(|l| l.event_signatures.iter()) .copied() .collect::>(), events diff --git a/chain/ethereum/src/capabilities.rs b/chain/ethereum/src/capabilities.rs index 15ce8501d5b..d1296c4f45c 100644 --- a/chain/ethereum/src/capabilities.rs +++ b/chain/ethereum/src/capabilities.rs @@ -1,11 +1,9 @@ use anyhow::Error; use graph::impl_slog_value; +use std::cmp::Ordering; +use std::collections::BTreeSet; use std::fmt; use std::str::FromStr; -use std::{ - cmp::{Ord, Ordering, PartialOrd}, - collections::BTreeSet, -}; use crate::DataSource; @@ -15,28 +13,28 @@ pub struct NodeCapabilities { pub traces: bool, } -// Take all NodeCapabilities fields into account when ordering -// A NodeCapabilities instance is considered equal or greater than another -// if all of its fields are equal or greater than the other -impl Ord for NodeCapabilities { - fn cmp(&self, other: &Self) -> Ordering { - match ( +/// Two [`NodeCapabilities`] can only be compared if one is the subset of the +/// other. No [`Ord`] (i.e. total order) implementation is applicable. +impl PartialOrd for NodeCapabilities { + fn partial_cmp(&self, other: &Self) -> Option { + product_order([ self.archive.cmp(&other.archive), self.traces.cmp(&other.traces), - ) { - (Ordering::Greater, Ordering::Greater) => Ordering::Greater, - (Ordering::Greater, Ordering::Equal) => Ordering::Greater, - (Ordering::Equal, Ordering::Greater) => Ordering::Greater, - (Ordering::Equal, Ordering::Equal) => Ordering::Equal, - (Ordering::Less, _) => Ordering::Less, - (_, Ordering::Less) => Ordering::Less, - } + ]) } } -impl PartialOrd for NodeCapabilities { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) +/// Defines a [product order](https://en.wikipedia.org/wiki/Product_order) over +/// an array of [`Ordering`]. +fn product_order(cmps: [Ordering; N]) -> Option { + if cmps.iter().all(|c| c.is_eq()) { + Some(Ordering::Equal) + } else if cmps.iter().all(|c| c.is_le()) { + Some(Ordering::Less) + } else if cmps.iter().all(|c| c.is_ge()) { + Some(Ordering::Greater) + } else { + None } } @@ -78,7 +76,7 @@ impl graph::blockchain::NodeCapabilities for NodeCapabilities { .requires_archive() .expect("failed to parse mappings") }), - traces: data_sources.into_iter().any(|ds| { + traces: data_sources.iter().any(|ds| { ds.mapping.has_call_handler() || ds.mapping.has_block_handler_with_call_filter() }), } diff --git a/chain/ethereum/src/chain.rs b/chain/ethereum/src/chain.rs index c793cd4e445..1a5d1d973ef 100644 --- a/chain/ethereum/src/chain.rs +++ b/chain/ethereum/src/chain.rs @@ -8,7 +8,7 @@ use graph::slog::debug; use graph::{ blockchain::{ block_stream::{ - BlockStreamEvent, BlockWithTriggers, FirehoseError, + BlockRefetcher, BlockStreamEvent, BlockWithTriggers, FirehoseError, FirehoseMapper as FirehoseMapperTrait, TriggersAdapter as TriggersAdapterTrait, }, firehose_block_stream::FirehoseBlockStream, @@ -64,24 +64,21 @@ impl BlockStreamBuilder for EthereumStreamBuilder { let requirements = filter.node_capabilities(); let adapter = chain .triggers_adapter(&deployment, &requirements, unified_api_version) - .expect(&format!( - "no adapter for network {} with capabilities {}", - chain.name, requirements - )); - - let firehose_endpoint = match chain.firehose_endpoints.random() { - Some(e) => e.clone(), - None => return Err(anyhow::format_err!("no firehose endpoint available",)), - }; + .unwrap_or_else(|_| { + panic!( + "no adapter for network {} with capabilities {}", + chain.name, requirements + ) + }); + + let firehose_endpoint = chain.firehose_endpoints.random()?; let logger = chain .logger_factory .subgraph_logger(&deployment) .new(o!("component" => "FirehoseBlockStream")); - let firehose_mapper = Arc::new(FirehoseMapper { - endpoint: firehose_endpoint.cheap_clone(), - }); + let firehose_mapper = Arc::new(FirehoseMapper {}); Ok(Box::new(FirehoseBlockStream::new( deployment.hash, @@ -110,6 +107,30 @@ impl BlockStreamBuilder for EthereumStreamBuilder { } } +pub struct EthereumBlockRefetcher {} + +#[async_trait] +impl BlockRefetcher for EthereumBlockRefetcher { + fn required(&self, chain: &Chain) -> bool { + chain.is_firehose_supported() + } + + async fn get_block( + &self, + chain: &Chain, + logger: &Logger, + cursor: FirehoseCursor, + ) -> Result { + let endpoint = chain.firehose_endpoints.random().context( + "expecting to always have at least one Firehose endpoint when this method is called", + )?; + + let block = endpoint.get_block::(cursor, logger).await?; + let ethereum_block: EthereumBlockWithCalls = (&block).try_into()?; + Ok(BlockFinality::NonFinal(ethereum_block)) + } +} + pub struct EthereumAdapterSelector { logger_factory: LoggerFactory, adapters: Arc, @@ -145,7 +166,7 @@ impl TriggersAdapterSelector for EthereumAdapterSelector { ) -> Result>, Error> { let logger = self .logger_factory - .subgraph_logger(&loc) + .subgraph_logger(loc) .new(o!("component" => "BlockStream")); let eth_adapter = if capabilities.traces && self.firehose_endpoints.len() > 0 { @@ -155,9 +176,10 @@ impl TriggersAdapterSelector for EthereumAdapterSelector { traces: false, }; - self.adapters.cheapest_with(&adjusted_capabilities)?.clone() + self.adapters + .call_or_cheapest(Some(&adjusted_capabilities))? } else { - self.adapters.cheapest_with(capabilities)?.clone() + self.adapters.cheapest_with(capabilities)? }; let ethrpc_metrics = Arc::new(SubgraphEthRpcMetrics::new(self.registry.clone(), &loc.hash)); @@ -186,6 +208,7 @@ pub struct Chain { reorg_threshold: BlockNumber, pub is_ingestible: bool, block_stream_builder: Arc>, + block_refetcher: Arc>, adapter_selector: Arc>, runtime_adapter: Arc>, } @@ -209,6 +232,7 @@ impl Chain { eth_adapters: EthereumNetworkAdapters, chain_head_update_listener: Arc, block_stream_builder: Arc>, + block_refetcher: Arc>, adapter_selector: Arc>, runtime_adapter: Arc>, reorg_threshold: BlockNumber, @@ -225,6 +249,7 @@ impl Chain { call_cache, chain_head_update_listener, block_stream_builder, + block_refetcher, adapter_selector, runtime_adapter, reorg_threshold, @@ -238,7 +263,7 @@ impl Chain { } pub fn cheapest_adapter(&self) -> Arc { - self.eth_adapters.cheapest().unwrap().clone() + self.eth_adapters.cheapest().unwrap() } } @@ -308,10 +333,12 @@ impl Blockchain for Chain { let requirements = filter.node_capabilities(); let adapter = self .triggers_adapter(&deployment, &requirements, unified_api_version.clone()) - .expect(&format!( - "no adapter for network {} with capabilities {}", - self.name, requirements - )); + .unwrap_or_else(|_| { + panic!( + "no adapter for network {} with capabilities {}", + self.name, requirements + ) + }); let logger = self .logger_factory @@ -369,6 +396,18 @@ impl Blockchain for Chain { .await } + fn is_refetch_block_required(&self) -> bool { + self.block_refetcher.required(self) + } + + async fn refetch_firehose_block( + &self, + logger: &Logger, + cursor: FirehoseCursor, + ) -> Result { + self.block_refetcher.get_block(self, logger, cursor).await + } + fn runtime_adapter(&self) -> Arc> { self.runtime_adapter.clone() } @@ -531,9 +570,9 @@ impl TriggersAdapterTrait for TriggersAdapter { &filter.log, &full_block.ethereum_block, )); - triggers.append(&mut parse_call_triggers(&filter.call, &full_block)?); - triggers.append(&mut parse_block_triggers(&filter.block, &full_block)); - Ok(BlockWithTriggers::new(block, triggers)) + triggers.append(&mut parse_call_triggers(&filter.call, full_block)?); + triggers.append(&mut parse_block_triggers(&filter.block, full_block)); + Ok(BlockWithTriggers::new(block, triggers, logger)) } } } @@ -584,9 +623,7 @@ impl TriggersAdapterTrait for TriggersAdapter { } } -pub struct FirehoseMapper { - endpoint: Arc, -} +pub struct FirehoseMapper {} #[async_trait] impl FirehoseMapperTrait for FirehoseMapper { @@ -647,11 +684,11 @@ impl FirehoseMapperTrait for FirehoseMapper { )) } - StepIrreversible => { + StepFinal => { unreachable!("irreversible step is not handled and should not be requested in the Firehose request") } - StepUnknown => { + StepUnset => { unreachable!("unknown step should not happen in the Firehose response") } } @@ -660,9 +697,10 @@ impl FirehoseMapperTrait for FirehoseMapper { async fn block_ptr_for_number( &self, logger: &Logger, + endpoint: &Arc, number: BlockNumber, ) -> Result { - self.endpoint + endpoint .block_ptr_for_number::(logger, number) .await } @@ -670,6 +708,7 @@ impl FirehoseMapperTrait for FirehoseMapper { async fn final_block_ptr_for( &self, logger: &Logger, + endpoint: &Arc, block: &BlockFinality, ) -> Result { // Firehose for Ethereum has an hard-coded confirmations for finality sets to 200 block @@ -680,8 +719,7 @@ impl FirehoseMapperTrait for FirehoseMapper { _ => 0, }; - self.endpoint - .block_ptr_for_number::(logger, final_block_number) + self.block_ptr_for_number(logger, endpoint, final_block_number) .await } } diff --git a/chain/ethereum/src/codec.rs b/chain/ethereum/src/codec.rs index b1ab8008796..211a70a00c8 100644 --- a/chain/ethereum/src/codec.rs +++ b/chain/ethereum/src/codec.rs @@ -4,7 +4,7 @@ mod pbcodec; use anyhow::format_err; use graph::{ - blockchain::{Block as BlockchainBlock, BlockPtr}, + blockchain::{Block as BlockchainBlock, BlockPtr, ChainStoreBlock, ChainStoreData}, prelude::{ web3, web3::types::{Bytes, H160, H2048, H256, H64, U256, U64}, @@ -195,7 +195,19 @@ impl<'a> TryInto for TransactionTraceAt<'a> { .from .try_decode_proto("transaction from address")?, ), - to: Some(self.trace.to.try_decode_proto("transaction to address")?), + to: match self.trace.calls.len() { + 0 => Some(self.trace.to.try_decode_proto("transaction to address")?), + _ => { + match CallType::from_i32(self.trace.calls[0].call_type).ok_or_else(|| { + format_err!("invalid call type: {}", self.trace.calls[0].call_type,) + })? { + CallType::Create => { + None // we don't want the 'to' address on a transaction that creates the contract, to align with RPC behavior + } + _ => Some(self.trace.to.try_decode_proto("transaction to")?), + } + } + }, value: self.trace.value.as_ref().map_or(U256::zero(), |x| x.into()), gas_price: self.trace.gas_price.as_ref().map(|x| x.into()), gas: U256::from(self.trace.gas_limit), @@ -244,27 +256,30 @@ impl TryInto for &Block { receipts_root: header.receipt_root.try_decode_proto("receipt root")?, gas_used: U256::from(header.gas_used), gas_limit: U256::from(header.gas_limit), - base_fee_per_gas: None, + base_fee_per_gas: Some( + header + .base_fee_per_gas + .as_ref() + .map_or_else(U256::default, |v| v.into()), + ), extra_data: Bytes::from(header.extra_data.clone()), logs_bloom: match &header.logs_bloom.len() { 0 => None, _ => Some(header.logs_bloom.try_decode_proto("logs bloom")?), }, - timestamp: U256::from( - header - .timestamp - .as_ref() - .map_or_else(|| U256::default(), |v| U256::from(v.seconds)), - ), + timestamp: header + .timestamp + .as_ref() + .map_or_else(U256::default, |v| U256::from(v.seconds)), difficulty: header .difficulty .as_ref() - .map_or_else(|| U256::default(), |v| v.into()), + .map_or_else(U256::default, |v| v.into()), total_difficulty: Some( header .total_difficulty .as_ref() - .map_or_else(|| U256::default(), |v| v.into()), + .map_or_else(U256::default, |v| v.into()), ), // FIXME (SF): Firehose does not have seal fields, are they really used? Might be required for POA chains only also, I've seen that stuff on xDai (is this important?) seal_fields: vec![], @@ -276,7 +291,7 @@ impl TryInto for &Block { transactions: self .transaction_traces .iter() - .map(|t| TransactionTraceAt::new(t, &self).try_into()) + .map(|t| TransactionTraceAt::new(t, self).try_into()) .collect::, Error>>()?, size: Some(U256::from(self.size)), mix_hash: Some(header.mix_hash.try_decode_proto("mix hash")?), @@ -321,7 +336,7 @@ impl TryInto for &Block { logs: r .logs .iter() - .map(|l| LogAt::new(l, &self, t).try_into()) + .map(|l| LogAt::new(l, self, t).try_into()) .collect::, Error>>()?, status: TransactionTraceStatus::from_i32(t.status) .ok_or_else(|| { @@ -426,6 +441,14 @@ impl BlockchainBlock for Block { fn parent_ptr(&self) -> Option { self.parent_ptr() } + + // This implementation provides the timestamp so that it works with block _meta's timestamp. + // However, the firehose types will not populate the transaction receipts so switching back + // from firehose ingestor to the firehose ingestor will prevent non final block from being + // processed using the block stored by firehose. + fn data(&self) -> Result { + self.header().to_json() + } } impl HeaderOnlyBlock { @@ -434,6 +457,25 @@ impl HeaderOnlyBlock { } } +impl Into for &BlockHeader { + fn into(self) -> ChainStoreData { + ChainStoreData { + block: ChainStoreBlock::new( + self.timestamp.as_ref().unwrap().seconds, + jsonrpc_core::Value::Null, + ), + } + } +} + +impl BlockHeader { + fn to_json(&self) -> Result { + let chain_store_data: ChainStoreData = self.into(); + + jsonrpc_core::to_value(chain_store_data) + } +} + impl<'a> From<&'a HeaderOnlyBlock> for BlockPtr { fn from(b: &'a HeaderOnlyBlock) -> BlockPtr { BlockPtr::from(b.header()) @@ -452,4 +494,43 @@ impl BlockchainBlock for HeaderOnlyBlock { fn parent_ptr(&self) -> Option { self.header().parent_ptr() } + + // This implementation provides the timestamp so that it works with block _meta's timestamp. + // However, the firehose types will not populate the transaction receipts so switching back + // from firehose ingestor to the firehose ingestor will prevent non final block from being + // processed using the block stored by firehose. + fn data(&self) -> Result { + self.header().to_json() + } +} + +#[cfg(test)] +mod test { + use graph::{blockchain::Block as _, prelude::chrono::Utc}; + use prost_types::Timestamp; + + use crate::codec::BlockHeader; + + use super::Block; + + #[test] + fn ensure_block_serialization() { + let now = Utc::now().timestamp(); + let mut block = Block::default(); + let mut header = BlockHeader::default(); + header.timestamp = Some(Timestamp { + seconds: now, + nanos: 0, + }); + + block.header = Some(header); + + let str_block = block.data().unwrap().to_string(); + + assert_eq!( + str_block, + // if you're confused when reading this, format needs {{ to escape { + format!(r#"{{"block":{{"data":null,"timestamp":"{}"}}}}"#, now) + ); + } } diff --git a/chain/ethereum/src/data_source.rs b/chain/ethereum/src/data_source.rs index 9bf89494cfc..84e0ecd58c9 100644 --- a/chain/ethereum/src/data_source.rs +++ b/chain/ethereum/src/data_source.rs @@ -2,6 +2,7 @@ use anyhow::{anyhow, Error}; use anyhow::{ensure, Context}; use graph::blockchain::TriggerWithHandler; use graph::components::store::StoredDynamicDataSource; +use graph::data_source::CausalityRegion; use graph::prelude::ethabi::ethereum_types::H160; use graph::prelude::ethabi::StateMutability; use graph::prelude::futures03::future::try_join; @@ -9,7 +10,7 @@ use graph::prelude::futures03::stream::FuturesOrdered; use graph::prelude::{Link, SubgraphManifestValidationError}; use graph::slog::{o, trace}; use std::str::FromStr; -use std::{convert::TryFrom, sync::Arc}; +use std::sync::Arc; use tiny_keccak::{keccak256, Keccak}; use graph::{ @@ -50,6 +51,54 @@ pub struct DataSource { } impl blockchain::DataSource for DataSource { + fn from_template_info(info: DataSourceTemplateInfo) -> Result { + let DataSourceTemplateInfo { + template, + params, + context, + creation_block, + } = info; + let template = template.into_onchain().ok_or(anyhow!( + "Cannot create onchain data source from offchain template" + ))?; + + // Obtain the address from the parameters + let string = params + .get(0) + .with_context(|| { + format!( + "Failed to create data source from template `{}`: address parameter is missing", + template.name + ) + })? + .trim_start_matches("0x"); + + let address = Address::from_str(string).with_context(|| { + format!( + "Failed to create data source from template `{}`, invalid address provided", + template.name + ) + })?; + + let contract_abi = template + .mapping + .find_abi(&template.source.abi) + .with_context(|| format!("template `{}`", template.name))?; + + Ok(DataSource { + kind: template.kind, + network: template.network, + name: template.name, + manifest_idx: template.manifest_idx, + address: Some(address), + start_block: 0, + mapping: template.mapping, + context: Arc::new(context), + creation_block: Some(creation_block), + contract_abi, + }) + } + fn address(&self) -> Option<&[u8]> { self.address.as_ref().map(|x| x.as_bytes()) } @@ -77,7 +126,7 @@ impl blockchain::DataSource for DataSource { } fn network(&self) -> Option<&str> { - self.network.as_ref().map(|s| s.as_str()) + self.network.as_deref() } fn context(&self) -> Arc> { @@ -131,7 +180,8 @@ impl blockchain::DataSource for DataSource { .as_ref() .map(|ctx| serde_json::to_value(&ctx).unwrap()), creation_block: self.creation_block, - is_offchain: false, + done_at: None, + causality_region: CausalityRegion::ONCHAIN, } } @@ -144,13 +194,16 @@ impl blockchain::DataSource for DataSource { param, context, creation_block, - is_offchain, + done_at, + causality_region, } = stored; ensure!( - !is_offchain, - "attempted to convert offchain data source to ethereum data source" + causality_region == CausalityRegion::ONCHAIN, + "stored ethereum data source has causality region {}, expected root", + causality_region ); + ensure!(done_at.is_none(), "onchain data sources are never done"); let context = context.map(serde_json::from_value).transpose()?; @@ -336,7 +389,7 @@ impl DataSource { event .inputs .iter() - .map(|input| format!("{}", event_param_type_signature(&input.kind))) + .map(|input| event_param_type_signature(&input.kind)) .collect::>() .join(",") ) @@ -371,16 +424,16 @@ impl DataSource { Uint(size) => format!("uint{}", size), Bool => "bool".into(), String => "string".into(), - Array(inner) => format!("{}[]", event_param_type_signature(&*inner)), + Array(inner) => format!("{}[]", event_param_type_signature(inner)), FixedBytes(size) => format!("bytes{}", size), FixedArray(inner, size) => { - format!("{}[{}]", event_param_type_signature(&*inner), size) + format!("{}[{}]", event_param_type_signature(inner), size) } Tuple(components) => format!( "({})", components .iter() - .map(|component| event_param_type_signature(&component)) + .map(event_param_type_signature) .collect::>() .join(",") ), @@ -443,7 +496,7 @@ impl DataSource { .collect::>() .join(","); // `address,uint256,bool) - arguments.push_str(")"); + arguments.push(')'); // `operation(address,uint256,bool)` let actual_signature = vec![function.name.clone(), arguments].join("("); target_signature == actual_signature @@ -478,7 +531,7 @@ impl DataSource { block: &Arc, logger: &Logger, ) -> Result>, Error> { - if !self.matches_trigger_address(&trigger) { + if !self.matches_trigger_address(trigger) { return Ok(None); } @@ -576,7 +629,7 @@ impl DataSource { // See also ca0edc58-0ec5-4c89-a7dd-2241797f5e50. let transaction = if log.transaction_hash != block.hash { block - .transaction_for_log(&log) + .transaction_for_log(log) .context("Found no transaction for event")? } else { // Infer some fields from the log and fill the rest with zeros. @@ -610,7 +663,7 @@ impl DataSource { } EthereumTrigger::Call(call) => { // Identify the call handler for this call - let handler = match self.handler_for_call(&call)? { + let handler = match self.handler_for_call(call)? { Some(handler) => handler, None => return Ok(None), }; @@ -697,7 +750,7 @@ impl DataSource { let transaction = Arc::new( block - .transaction_for_call(&call) + .transaction_for_call(call) .context("Found no transaction for call")?, ); let logging_extras = Arc::new(o! { @@ -751,64 +804,12 @@ impl blockchain::UnresolvedDataSource for UnresolvedDataSource { info!(logger, "Resolve data source"; "name" => &name, "source_address" => format_args!("{:?}", source.address), "source_start_block" => source.start_block); - let mapping = mapping.resolve(&*resolver, logger).await?; + let mapping = mapping.resolve(resolver, logger).await?; DataSource::from_manifest(kind, network, name, source, mapping, context, manifest_idx) } } -impl TryFrom> for DataSource { - type Error = anyhow::Error; - - fn try_from(info: DataSourceTemplateInfo) -> Result { - let DataSourceTemplateInfo { - template, - params, - context, - creation_block, - } = info; - let template = template.into_onchain().ok_or(anyhow!( - "Cannot create onchain data source from offchain template" - ))?; - - // Obtain the address from the parameters - let string = params - .get(0) - .with_context(|| { - format!( - "Failed to create data source from template `{}`: address parameter is missing", - template.name - ) - })? - .trim_start_matches("0x"); - - let address = Address::from_str(string).with_context(|| { - format!( - "Failed to create data source from template `{}`, invalid address provided", - template.name - ) - })?; - - let contract_abi = template - .mapping - .find_abi(&template.source.abi) - .with_context(|| format!("template `{}`", template.name))?; - - Ok(DataSource { - kind: template.kind, - network: template.network, - name: template.name, - manifest_idx: template.manifest_idx, - address: Some(address), - start_block: 0, - mapping: template.mapping, - context: Arc::new(context), - creation_block: Some(creation_block), - contract_abi, - }) - } -} - #[derive(Clone, Debug, Default, Hash, Eq, PartialEq, Deserialize)] pub struct UnresolvedDataSourceTemplate { pub kind: String, @@ -1010,7 +1011,7 @@ impl UnresolvedMappingABI { "link" => &self.file.link ); - let contract_bytes = resolver.cat(&logger, &self.file).await?; + let contract_bytes = resolver.cat(logger, &self.file).await?; let contract = Contract::load(&*contract_bytes)?; Ok(MappingABI { name: self.name, @@ -1058,7 +1059,7 @@ impl MappingEventHandler { /// Hashes a string to a H256 hash. fn string_to_h256(s: &str) -> H256 { let mut result = [0u8; 32]; - let data = s.replace(" ", "").into_bytes(); + let data = s.replace(' ', "").into_bytes(); let mut sponge = Keccak::new_keccak256(); sponge.update(&data); sponge.finalize(&mut result); diff --git a/chain/ethereum/src/ethereum_adapter.rs b/chain/ethereum/src/ethereum_adapter.rs index e31d2b6662e..a7c0eec5bfb 100644 --- a/chain/ethereum/src/ethereum_adapter.rs +++ b/chain/ethereum/src/ethereum_adapter.rs @@ -5,7 +5,6 @@ use graph::blockchain::BlockHash; use graph::blockchain::ChainIdentifier; use graph::components::transaction_receipt::LightTransactionReceipt; use graph::data::subgraph::UnifiedMappingApiVersion; -use graph::data::subgraph::API_VERSION_0_0_5; use graph::data::subgraph::API_VERSION_0_0_7; use graph::prelude::ethabi::ParamType; use graph::prelude::ethabi::Token; @@ -64,6 +63,7 @@ pub struct EthereumAdapter { web3: Arc>, metrics: Arc, supports_eip_1898: bool, + call_only: bool, } /// Gas limit for `eth_call`. The value of 50_000_000 is a protocol-wide parameter so this @@ -85,11 +85,16 @@ impl CheapClone for EthereumAdapter { web3: self.web3.cheap_clone(), metrics: self.metrics.cheap_clone(), supports_eip_1898: self.supports_eip_1898, + call_only: self.call_only, } } } impl EthereumAdapter { + pub fn is_call_only(&self) -> bool { + self.call_only + } + pub async fn new( logger: Logger, provider: String, @@ -97,6 +102,7 @@ impl EthereumAdapter { transport: Transport, provider_metrics: Arc, supports_eip_1898: bool, + call_only: bool, ) -> Self { // Unwrap: The transport was constructed with this url, so it is valid and has a host. let hostname = graph::url::Url::parse(url) @@ -123,6 +129,7 @@ impl EthereumAdapter { web3, metrics: provider_metrics, supports_eip_1898: supports_eip_1898 && !is_ganache, + call_only, } } @@ -134,6 +141,8 @@ impl EthereumAdapter { to: BlockNumber, addresses: Vec, ) -> Result, Error> { + assert!(!self.call_only); + let eth = self.clone(); let retry_log_message = format!("trace_filter RPC call for block range: [{}..{}]", from, to); @@ -168,7 +177,7 @@ impl EthereumAdapter { .filter(trace_filter) .await .map(move |traces| { - if traces.len() > 0 { + if !traces.is_empty() { if to == from { debug!( logger_for_triggers, @@ -226,6 +235,8 @@ impl EthereumAdapter { filter: Arc, too_many_logs_fingerprints: &'static [&'static str], ) -> Result, TimeoutError> { + assert!(!self.call_only); + let eth_adapter = self.clone(); let retry_log_message = format!("eth_getLogs RPC call for block range: [{}..{}]", from, to); retry(retry_log_message, &logger) @@ -291,7 +302,7 @@ impl EthereumAdapter { true => 1, }; - let eth = self.clone(); + let eth = self; let logger = logger.to_owned(); stream::unfold(from, move |start| { if start > to { @@ -501,7 +512,7 @@ impl EthereumAdapter { let env_geth_call_errors = ENV_VARS.geth_eth_call_errors.iter(); let mut geth_execution_errors = GETH_EXECUTION_ERRORS .iter() - .map(|s| *s) + .copied() .chain(env_geth_call_errors.map(|s| s.as_str())); let as_solidity_revert_with_reason = |bytes: &[u8]| { @@ -591,7 +602,7 @@ impl EthereumAdapter { .compat() .from_err::() .and_then(move |block| { - block.map(|block| Arc::new(block)).ok_or_else(|| { + block.map(Arc::new).ok_or_else(|| { anyhow::anyhow!("Ethereum node did not find block {:?}", hash) }) }) @@ -657,7 +668,7 @@ impl EthereumAdapter { block_ptr: BlockPtr, ) -> Result { let block_hash = self - .block_hash_by_block_number(&logger, block_ptr.number) + .block_hash_by_block_number(logger, block_ptr.number) .compat() .await?; block_hash @@ -701,8 +712,12 @@ impl EthereumAdapter { ) -> Box + Send + 'a> { let eth = self.clone(); - let mut addresses: Vec = call_filter - .contract_addresses_function_signatures + let EthereumCallFilter { + contract_addresses_function_signatures, + wildcard_signatures, + } = call_filter; + + let mut addresses: Vec = contract_addresses_function_signatures .iter() .filter(|(_addr, (start_block, _fsigs))| start_block <= &to) .map(|(addr, (_start_block, _fsigs))| *addr) @@ -710,27 +725,28 @@ impl EthereumAdapter { .into_iter() .collect::>(); - if addresses.is_empty() { + if addresses.is_empty() && wildcard_signatures.is_empty() { // The filter has no started data sources in the requested range, nothing to do. // This prevents an expensive call to `trace_filter` with empty `addresses`. return Box::new(stream::empty()); } - if addresses.len() > 100 { + // if wildcard_signatures is on, we can't filter by topic so we need to get all the traces. + if addresses.len() > 100 || !wildcard_signatures.is_empty() { // If the address list is large, request all traces, this avoids generating huge // requests and potentially getting 413 errors. addresses = vec![]; } Box::new( - eth.trace_stream(&logger, subgraph_metrics, from, to, addresses) + eth.trace_stream(logger, subgraph_metrics, from, to, addresses) .filter_map(|trace| EthereumCall::try_from_trace(&trace)) .filter(move |call| { // `trace_filter` can only filter by calls `to` an address and // a block range. Since subgraphs are subscribing to calls // for a specific contract function an additional filter needs // to be applied - call_filter.matches(&call) + call_filter.matches(call) }), ) } @@ -746,7 +762,7 @@ impl EthereumAdapter { let addresses = Vec::new(); let traces = eth .trace_stream( - &logger, + logger, subgraph_metrics.clone(), block_number, block_number, @@ -880,8 +896,7 @@ impl EthereumAdapterTrait for EthereumAdapter { metrics.set_status(ProviderStatus::GenesisFail, &provider); e })? - .map(|gen_block| gen_block.hash.map(BlockHash::from)) - .flatten() + .and_then(|gen_block| gen_block.hash.map(BlockHash::from)) .ok_or_else(|| anyhow!("Ethereum node could not find genesis block")) } }) @@ -982,7 +997,7 @@ impl EthereumAdapterTrait for EthereumAdapter { block_hash: H256, ) -> Box + Send> { Box::new( - self.block_by_hash(&logger, block_hash) + self.block_by_hash(logger, block_hash) .and_then(move |block_opt| { block_opt.ok_or_else(move || { anyhow!( @@ -1144,7 +1159,7 @@ impl EthereumAdapterTrait for EthereumAdapter { block_number ); Box::new( - retry(retry_log_message, &logger) + retry(retry_log_message, logger) .no_limit() .timeout_secs(ENV_VARS.json_rpc_timeout.as_secs()) .run(move || { @@ -1153,7 +1168,7 @@ impl EthereumAdapterTrait for EthereumAdapter { web3.eth() .block(BlockId::Number(block_number.into())) .await - .map(|block_opt| block_opt.map(|block| block.hash).flatten()) + .map(|block_opt| block_opt.and_then(|block| block.hash)) .map_err(Error::from) } }) @@ -1433,6 +1448,8 @@ pub(crate) async fn blocks_with_triggers( block_hashes.insert(to_hash); triggers_by_block.entry(to).or_insert(Vec::new()); + let logger2 = logger.cheap_clone(); + let blocks = adapter .load_blocks(logger.cheap_clone(), chain_store.clone(), block_hashes) .and_then( @@ -1440,6 +1457,7 @@ pub(crate) async fn blocks_with_triggers( Some(triggers) => Ok(BlockWithTriggers::new( BlockFinality::Final(block), triggers, + &logger2, )), None => Err(anyhow!( "block {} not found in `triggers_by_block`", @@ -1452,14 +1470,10 @@ pub(crate) async fn blocks_with_triggers( .await?; // Filter out call triggers that come from unsuccessful transactions - let mut blocks = if unified_api_version.equal_or_greater_than(&API_VERSION_0_0_5) { - let futures = blocks.into_iter().map(|block| { - filter_call_triggers_from_unsuccessful_transactions(block, ð, &chain_store, &logger) - }); - futures03::future::try_join_all(futures).await? - } else { - blocks - }; + let futures = blocks.into_iter().map(|block| { + filter_call_triggers_from_unsuccessful_transactions(block, ð, &chain_store, &logger) + }); + let mut blocks = futures03::future::try_join_all(futures).await?; blocks.sort_by_key(|block| block.ptr().number); @@ -1695,7 +1709,7 @@ async fn filter_call_triggers_from_unsuccessful_transactions( let futures = transactions_without_receipt .iter() .map(|transaction| async move { - fetch_receipt_from_ethereum_client(ð, &transaction.hash) + fetch_receipt_from_ethereum_client(eth, &transaction.hash) .await .map(|receipt| (transaction, receipt)) }); @@ -1794,7 +1808,7 @@ async fn fetch_transaction_receipts_in_batch( .map(move |hash| { let logger = logger.cheap_clone(); eth.transaction_receipt(hash) - .map_err(|web3_error| IngestorError::from(web3_error)) + .map_err(IngestorError::from) .and_then(move |some_receipt| async move { resolve_transaction_receipt(some_receipt, hash, block_hash, logger) }) @@ -1847,7 +1861,12 @@ fn resolve_transaction_receipt( // Check if the receipt has a block hash and is for the right block. Parity nodes seem // to return receipts with no block hash when a transaction is no longer in the main // chain, so treat that case the same as a receipt being absent entirely. - if receipt.block_hash != Some(block_hash) { + // + // Also as a sanity check against provider nonsense, check that the receipt transaction + // hash and the requested transaction hash match. + if receipt.block_hash != Some(block_hash) + || transaction_hash != receipt.transaction_hash + { info!( logger, "receipt block mismatch"; "receipt_block_hash" => @@ -1855,13 +1874,14 @@ fn resolve_transaction_receipt( "block_hash" => block_hash.to_string(), "tx_hash" => transaction_hash.to_string(), + "receipt_tx_hash" => receipt.transaction_hash.to_string(), ); // If the receipt came from a different block, then the Ethereum node no longer // considers this block to be in the main chain. Nothing we can do from here except // give up trying to ingest this block. There is no way to get the transaction // receipt from this block. - Err(IngestorError::BlockUnavailable(block_hash.clone())) + Err(IngestorError::BlockUnavailable(block_hash)) } else { Ok(receipt) } @@ -2019,7 +2039,7 @@ async fn get_transaction_receipts_for_transaction_hashes( if !unique_transaction_hashes.remove(&receipt.transaction_hash) { bail!("Received a receipt for a different transaction hash") } - if let Vacant(entry) = receipts_by_hash.entry(receipt.transaction_hash.clone()) { + if let Vacant(entry) = receipts_by_hash.entry(receipt.transaction_hash) { entry.insert(receipt); } else { bail!("Received a duplicate transaction receipt") diff --git a/chain/ethereum/src/ingestor.rs b/chain/ethereum/src/ingestor.rs index ac4f906f174..7d902fe5d5f 100644 --- a/chain/ethereum/src/ingestor.rs +++ b/chain/ethereum/src/ingestor.rs @@ -180,7 +180,7 @@ impl BlockIngestor { .block_by_hash(&self.logger, block_hash) .compat() .await? - .ok_or_else(|| IngestorError::BlockUnavailable(block_hash))?; + .ok_or(IngestorError::BlockUnavailable(block_hash))?; let ethereum_block = self .eth_adapter .load_full_block(&self.logger, block) diff --git a/chain/ethereum/src/network.rs b/chain/ethereum/src/network.rs index 0981f59e908..c36a6ce7ba1 100644 --- a/chain/ethereum/src/network.rs +++ b/chain/ethereum/src/network.rs @@ -1,6 +1,7 @@ -use anyhow::{anyhow, Context}; +use anyhow::{anyhow, bail, Context}; use graph::cheap_clone::CheapClone; use graph::prelude::rand::{self, seq::IteratorRandom}; +use std::cmp::Ordering; use std::collections::HashMap; use std::sync::Arc; @@ -22,12 +23,26 @@ pub struct EthereumNetworkAdapter { limit: usize, } -#[derive(Clone)] +impl EthereumNetworkAdapter { + fn is_call_only(&self) -> bool { + self.adapter.is_call_only() + } +} + +#[derive(Clone, Default)] pub struct EthereumNetworkAdapters { pub adapters: Vec, + pub call_only_adapters: Vec, } impl EthereumNetworkAdapters { + pub fn push_adapter(&mut self, adapter: EthereumNetworkAdapter) { + if adapter.is_call_only() { + self.call_only_adapters.push(adapter); + } else { + self.adapters.push(adapter); + } + } pub fn all_cheapest_with( &self, required_capabilities: &NodeCapabilities, @@ -64,8 +79,7 @@ impl EthereumNetworkAdapters { // EthereumAdapters are sorted by their NodeCapabilities when the EthereumNetworks // struct is instantiated so they do not need to be sorted here self.adapters - .iter() - .next() + .first() .map(|ethereum_network_adapter| ethereum_network_adapter.adapter.clone()) } @@ -73,6 +87,42 @@ impl EthereumNetworkAdapters { self.adapters .retain(|adapter| adapter.adapter.provider() != provider); } + + pub fn call_or_cheapest( + &self, + capabilities: Option<&NodeCapabilities>, + ) -> anyhow::Result> { + match self.call_only_adapter()? { + Some(adapter) => Ok(adapter), + None => self.cheapest_with(capabilities.unwrap_or(&NodeCapabilities { + // Archive is required for call_only + archive: true, + traces: false, + })), + } + } + + pub fn call_only_adapter(&self) -> anyhow::Result>> { + if self.call_only_adapters.is_empty() { + return Ok(None); + } + + let adapters = self + .call_only_adapters + .iter() + .min_by_key(|x| Arc::strong_count(&x.adapter)) + .ok_or(anyhow!("no available call only endpoints"))?; + + // TODO: This will probably blow up a lot sooner than [limit] amount of + // subgraphs, since we probably use a few instances. + if Arc::strong_count(&adapters.adapter) >= adapters.limit { + bail!("call only adapter has reached the concurrency limit"); + } + + // Cloning here ensure we have the correct count at any given time, if we return a reference it can be cloned later + // which could cause a high number of endpoints to be given away before accounting for them. + Ok(Some(adapters.adapter.clone())) + } } #[derive(Clone)] @@ -97,10 +147,11 @@ impl EthereumNetworks { let network_adapters = self .networks .entry(name) - .or_insert(EthereumNetworkAdapters { adapters: vec![] }); - network_adapters.adapters.push(EthereumNetworkAdapter { + .or_insert(EthereumNetworkAdapters::default()); + + network_adapters.push_adapter(EthereumNetworkAdapter { capabilities, - adapter: adapter.clone(), + adapter, limit, }); } @@ -135,9 +186,14 @@ impl EthereumNetworks { pub fn sort(&mut self) { for adapters in self.networks.values_mut() { - adapters - .adapters - .sort_by_key(|adapter| adapter.capabilities) + adapters.adapters.sort_by(|a, b| { + a.capabilities + .partial_cmp(&b.capabilities) + // We can't define a total ordering over node capabilities, + // so incomparable items are considered equal and end up + // near each other. + .unwrap_or(Ordering::Equal) + }) } } @@ -155,6 +211,14 @@ impl EthereumNetworks { #[cfg(test)] mod tests { + use std::sync::Arc; + + use graph::{prelude::MetricsRegistry, tokio, url::Url}; + use graph_mock::MockMetricsRegistry; + use http::HeaderMap; + + use crate::{EthereumAdapter, EthereumNetworks, ProviderEthRpcMetrics, Transport}; + use super::NodeCapabilities; #[test] @@ -211,4 +275,104 @@ mod tests { assert_eq!(true, &full_traces >= &full); assert_eq!(true, &full_traces >= &full_traces); } + + #[tokio::test] + async fn adapter_selector_selects_eth_call() { + let chain = "mainnet".to_string(); + let logger = graph::log::logger(true); + let mock_registry: Arc = Arc::new(MockMetricsRegistry::new()); + let transport = + Transport::new_rpc(Url::parse("http://127.0.0.1").unwrap(), HeaderMap::new()); + let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); + + let eth_call_adapter = Arc::new( + EthereumAdapter::new( + logger.clone(), + String::new(), + "http://127.0.0.1", + transport.clone(), + provider_metrics.clone(), + true, + true, + ) + .await, + ); + + let eth_adapter = Arc::new( + EthereumAdapter::new( + logger.clone(), + String::new(), + "http://127.0.0.1", + transport.clone(), + provider_metrics.clone(), + true, + false, + ) + .await, + ); + + let mut adapters = { + let mut ethereum_networks = EthereumNetworks::new(); + ethereum_networks.insert( + chain.clone(), + NodeCapabilities { + archive: true, + traces: false, + }, + eth_call_adapter.clone(), + 3, + ); + ethereum_networks.insert( + chain.clone(), + NodeCapabilities { + archive: true, + traces: false, + }, + eth_adapter.clone(), + 3, + ); + ethereum_networks.networks.get(&chain).unwrap().clone() + }; + // one reference above and one inside adapters struct + assert_eq!(Arc::strong_count(ð_call_adapter), 2); + assert_eq!(Arc::strong_count(ð_adapter), 2); + + { + // Not Found + assert!(adapters + .cheapest_with(&NodeCapabilities { + archive: false, + traces: true, + }) + .is_err()); + + // Check cheapest is not call only + let adapter = adapters + .cheapest_with(&NodeCapabilities { + archive: true, + traces: false, + }) + .unwrap(); + assert_eq!(adapter.is_call_only(), false); + } + + // Check limits + { + let adapter = adapters.call_or_cheapest(None).unwrap(); + assert!(adapter.is_call_only()); + assert!(adapters.call_or_cheapest(None).is_err()); + } + + // Check empty falls back to call only + { + adapters.call_only_adapters = vec![]; + let adapter = adapters + .call_or_cheapest(Some(&NodeCapabilities { + archive: true, + traces: false, + })) + .unwrap(); + assert_eq!(adapter.is_call_only(), false); + } + } } diff --git a/chain/ethereum/src/protobuf/sf.ethereum.r#type.v2.rs b/chain/ethereum/src/protobuf/sf.ethereum.r#type.v2.rs index a4daf1e2498..1e6b7841c8d 100644 --- a/chain/ethereum/src/protobuf/sf.ethereum.r#type.v2.rs +++ b/chain/ethereum/src/protobuf/sf.ethereum.r#type.v2.rs @@ -1,27 +1,28 @@ +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Block { - #[prost(int32, tag="1")] + #[prost(int32, tag = "1")] pub ver: i32, - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub hash: ::prost::alloc::vec::Vec, - #[prost(uint64, tag="3")] + #[prost(uint64, tag = "3")] pub number: u64, - #[prost(uint64, tag="4")] + #[prost(uint64, tag = "4")] pub size: u64, - #[prost(message, optional, tag="5")] + #[prost(message, optional, tag = "5")] pub header: ::core::option::Option, /// Uncles represents block produced with a valid solution but were not actually choosen /// as the canonical block for the given height so they are mostly "forked" blocks. /// /// If the Block has been produced using the Proof of Stake consensus algorithm, this /// field will actually be always empty. - #[prost(message, repeated, tag="6")] + #[prost(message, repeated, tag = "6")] pub uncles: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag="10")] + #[prost(message, repeated, tag = "10")] pub transaction_traces: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag="11")] + #[prost(message, repeated, tag = "11")] pub balance_changes: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag="20")] + #[prost(message, repeated, tag = "20")] pub code_changes: ::prost::alloc::vec::Vec, } /// HeaderOnlyBlock is used to optimally unpack the \[Block\] structure (note the @@ -30,68 +31,74 @@ pub struct Block { /// /// WARN: this is a client-side optimization pattern and should be moved in the /// consuming code. +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct HeaderOnlyBlock { - #[prost(message, optional, tag="5")] + #[prost(message, optional, tag = "5")] pub header: ::core::option::Option, } /// BlockWithRefs is a lightweight block, with traces and transactions /// purged from the `block` within, and only. It is used in transports /// to pass block data around. +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BlockWithRefs { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub id: ::prost::alloc::string::String, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub block: ::core::option::Option, - #[prost(message, optional, tag="3")] + #[prost(message, optional, tag = "3")] pub transaction_trace_refs: ::core::option::Option, - #[prost(bool, tag="4")] + #[prost(bool, tag = "4")] pub irreversible: bool, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionRefs { - #[prost(bytes="vec", repeated, tag="1")] + #[prost(bytes = "vec", repeated, tag = "1")] pub hashes: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct UnclesHeaders { - #[prost(message, repeated, tag="1")] + #[prost(message, repeated, tag = "1")] pub uncles: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BlockRef { - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub hash: ::prost::alloc::vec::Vec, - #[prost(uint64, tag="2")] + #[prost(uint64, tag = "2")] pub number: u64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BlockHeader { - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub parent_hash: ::prost::alloc::vec::Vec, /// Uncle hash of the block, some reference it as `sha3Uncles`, but `sha3`` is badly worded, so we prefer `uncle_hash`, also /// referred as `ommers` in EIP specification. /// /// If the Block containing this `BlockHeader` has been produced using the Proof of Stake /// consensus algorithm, this field will actually be constant and set to `0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347`. - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub uncle_hash: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="3")] + #[prost(bytes = "vec", tag = "3")] pub coinbase: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="4")] + #[prost(bytes = "vec", tag = "4")] pub state_root: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="5")] + #[prost(bytes = "vec", tag = "5")] pub transactions_root: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="6")] + #[prost(bytes = "vec", tag = "6")] pub receipt_root: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="7")] + #[prost(bytes = "vec", tag = "7")] pub logs_bloom: ::prost::alloc::vec::Vec, /// Difficulty is the difficulty of the Proof of Work algorithm that was required to compute a solution. /// /// If the Block containing this `BlockHeader` has been produced using the Proof of Stake /// consensus algorithm, this field will actually be constant and set to `0x00`. - #[prost(message, optional, tag="8")] + #[prost(message, optional, tag = "8")] pub difficulty: ::core::option::Option, /// TotalDifficulty is the sum of all previous blocks difficulty including this block difficulty. /// @@ -99,15 +106,15 @@ pub struct BlockHeader { /// consensus algorithm, this field will actually be constant and set to the terminal total difficulty /// that was required to transition to Proof of Stake algorithm, which varies per network. It is set to /// 58 750 000 000 000 000 000 000 on Ethereum Mainnet and to 10 790 000 on Ethereum Testnet Goerli. - #[prost(message, optional, tag="17")] + #[prost(message, optional, tag = "17")] pub total_difficulty: ::core::option::Option, - #[prost(uint64, tag="9")] + #[prost(uint64, tag = "9")] pub number: u64, - #[prost(uint64, tag="10")] + #[prost(uint64, tag = "10")] pub gas_limit: u64, - #[prost(uint64, tag="11")] + #[prost(uint64, tag = "11")] pub gas_used: u64, - #[prost(message, optional, tag="12")] + #[prost(message, optional, tag = "12")] pub timestamp: ::core::option::Option<::prost_types::Timestamp>, /// ExtraData is free-form bytes included in the block by the "miner". While on Yellow paper of /// Ethereum this value is maxed to 32 bytes, other consensus algorithm like Clique and some other @@ -115,57 +122,59 @@ pub struct BlockHeader { /// /// If the Block containing this `BlockHeader` has been produced using the Proof of Stake /// consensus algorithm, this field is strictly enforced to be <= 32 bytes. - #[prost(bytes="vec", tag="13")] + #[prost(bytes = "vec", tag = "13")] pub extra_data: ::prost::alloc::vec::Vec, /// MixHash is used to prove, when combined with the `nonce` that sufficient amount of computation has been /// achieved and that the solution found is valid. - #[prost(bytes="vec", tag="14")] + #[prost(bytes = "vec", tag = "14")] pub mix_hash: ::prost::alloc::vec::Vec, /// Nonce is used to prove, when combined with the `mix_hash` that sufficient amount of computation has been /// achieved and that the solution found is valid. /// /// If the Block containing this `BlockHeader` has been produced using the Proof of Stake /// consensus algorithm, this field will actually be constant and set to `0`. - #[prost(uint64, tag="15")] + #[prost(uint64, tag = "15")] pub nonce: u64, /// Hash is the hash of the block which is actually the computation: /// - /// Keccak256(rlp([ - /// parent_hash, - /// uncle_hash, - /// coinbase, - /// state_root, - /// transactions_root, - /// receipt_root, - /// logs_bloom, - /// difficulty, - /// number, - /// gas_limit, - /// gas_used, - /// timestamp, - /// extra_data, - /// mix_hash, - /// nonce, - /// base_fee_per_gas - /// ])) + /// Keccak256(rlp([ + /// parent_hash, + /// uncle_hash, + /// coinbase, + /// state_root, + /// transactions_root, + /// receipt_root, + /// logs_bloom, + /// difficulty, + /// number, + /// gas_limit, + /// gas_used, + /// timestamp, + /// extra_data, + /// mix_hash, + /// nonce, + /// base_fee_per_gas + /// ])) /// - #[prost(bytes="vec", tag="16")] + #[prost(bytes = "vec", tag = "16")] pub hash: ::prost::alloc::vec::Vec, /// Base fee per gas according to EIP-1559 (e.g. London Fork) rules, only set if London is present/active on the chain. - #[prost(message, optional, tag="18")] + #[prost(message, optional, tag = "18")] pub base_fee_per_gas: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BigInt { - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub bytes: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionTrace { /// consensus - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub to: ::prost::alloc::vec::Vec, - #[prost(uint64, tag="2")] + #[prost(uint64, tag = "2")] pub nonce: u64, /// GasPrice represents the effective price that has been paid for each gas unit of this transaction. Over time, the /// Ethereum rules changes regarding GasPrice field here. Before London fork, the GasPrice was always set to the @@ -176,79 +185,89 @@ pub struct TransactionTrace { /// /// In cases where `TransactionTrace.Type == TRX_TYPE_DYNAMIC_FEE`, then GasPrice is the effective gas price paid /// for the transaction which is equals to `BlockHeader.BaseFeePerGas + TransactionTrace.` - #[prost(message, optional, tag="3")] + #[prost(message, optional, tag = "3")] pub gas_price: ::core::option::Option, /// GasLimit is the maximum of gas unit the sender of the transaction is willing to consume when perform the EVM /// execution of the whole transaction - #[prost(uint64, tag="4")] + #[prost(uint64, tag = "4")] pub gas_limit: u64, /// Value is the amount of Ether transferred as part of this transaction. - #[prost(message, optional, tag="5")] + #[prost(message, optional, tag = "5")] pub value: ::core::option::Option, /// Input data the transaction will receive for execution of EVM. - #[prost(bytes="vec", tag="6")] + #[prost(bytes = "vec", tag = "6")] pub input: ::prost::alloc::vec::Vec, /// V is the recovery ID value for the signature Y point. - #[prost(bytes="vec", tag="7")] + #[prost(bytes = "vec", tag = "7")] pub v: ::prost::alloc::vec::Vec, /// R is the signature's X point on the elliptic curve (32 bytes). - #[prost(bytes="vec", tag="8")] + #[prost(bytes = "vec", tag = "8")] pub r: ::prost::alloc::vec::Vec, /// S is the signature's Y point on the elliptic curve (32 bytes). - #[prost(bytes="vec", tag="9")] + #[prost(bytes = "vec", tag = "9")] pub s: ::prost::alloc::vec::Vec, /// GasUsed is the total amount of gas unit used for the whole execution of the transaction. - #[prost(uint64, tag="10")] + #[prost(uint64, tag = "10")] pub gas_used: u64, /// Type represents the Ethereum transaction type, available only since EIP-2718 & EIP-2930 activation which happened on Berlin fork. /// The value is always set even for transaction before Berlin fork because those before the fork are still legacy transactions. - #[prost(enumeration="transaction_trace::Type", tag="12")] + #[prost(enumeration = "transaction_trace::Type", tag = "12")] pub r#type: i32, /// AcccessList represents the storage access this transaction has agreed to do in which case those storage /// access cost less gas unit per access. /// /// This will is populated only if `TransactionTrace.Type == TRX_TYPE_ACCESS_LIST || TRX_TYPE_DYNAMIC_FEE` which /// is possible only if Berlin (TRX_TYPE_ACCESS_LIST) nor London (TRX_TYPE_DYNAMIC_FEE) fork are active on the chain. - #[prost(message, repeated, tag="14")] + #[prost(message, repeated, tag = "14")] pub access_list: ::prost::alloc::vec::Vec, /// MaxFeePerGas is the maximum fee per gas the user is willing to pay for the transaction gas used. /// /// This will is populated only if `TransactionTrace.Type == TRX_TYPE_DYNAMIC_FEE` which is possible only /// if London fork is active on the chain. - #[prost(message, optional, tag="11")] + #[prost(message, optional, tag = "11")] pub max_fee_per_gas: ::core::option::Option, /// MaxPriorityFeePerGas is priority fee per gas the user to pay in extra to the miner on top of the block's /// base fee. /// /// This will is populated only if `TransactionTrace.Type == TRX_TYPE_DYNAMIC_FEE` which is possible only /// if London fork is active on the chain. - #[prost(message, optional, tag="13")] + #[prost(message, optional, tag = "13")] pub max_priority_fee_per_gas: ::core::option::Option, /// meta - #[prost(uint32, tag="20")] + #[prost(uint32, tag = "20")] pub index: u32, - #[prost(bytes="vec", tag="21")] + #[prost(bytes = "vec", tag = "21")] pub hash: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="22")] + #[prost(bytes = "vec", tag = "22")] pub from: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="23")] + #[prost(bytes = "vec", tag = "23")] pub return_data: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="24")] + #[prost(bytes = "vec", tag = "24")] pub public_key: ::prost::alloc::vec::Vec, - #[prost(uint64, tag="25")] + #[prost(uint64, tag = "25")] pub begin_ordinal: u64, - #[prost(uint64, tag="26")] + #[prost(uint64, tag = "26")] pub end_ordinal: u64, - #[prost(enumeration="TransactionTraceStatus", tag="30")] + #[prost(enumeration = "TransactionTraceStatus", tag = "30")] pub status: i32, - #[prost(message, optional, tag="31")] + #[prost(message, optional, tag = "31")] pub receipt: ::core::option::Option, - #[prost(message, repeated, tag="32")] + #[prost(message, repeated, tag = "32")] pub calls: ::prost::alloc::vec::Vec, } /// Nested message and enum types in `TransactionTrace`. pub mod transaction_trace { - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum Type { /// All transactions that ever existed prior Berlin fork before EIP-2718 was implemented. @@ -263,24 +282,49 @@ pub mod transaction_trace { /// executed against EIP-1559 rules which dictates a dynamic gas cost based on the congestion of the network. TrxTypeDynamicFee = 2, } + impl Type { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Type::TrxTypeLegacy => "TRX_TYPE_LEGACY", + Type::TrxTypeAccessList => "TRX_TYPE_ACCESS_LIST", + Type::TrxTypeDynamicFee => "TRX_TYPE_DYNAMIC_FEE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "TRX_TYPE_LEGACY" => Some(Self::TrxTypeLegacy), + "TRX_TYPE_ACCESS_LIST" => Some(Self::TrxTypeAccessList), + "TRX_TYPE_DYNAMIC_FEE" => Some(Self::TrxTypeDynamicFee), + _ => None, + } + } + } } /// AccessTuple represents a list of storage keys for a given contract's address and is used /// for AccessList construction. +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccessTuple { - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub address: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", repeated, tag="2")] + #[prost(bytes = "vec", repeated, tag = "2")] pub storage_keys: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, } /// TransactionTraceWithBlockRef +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionTraceWithBlockRef { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub trace: ::core::option::Option, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub block_ref: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionReceipt { /// State root is an intermediate state_root hash, computed in-between transactions to make @@ -296,27 +340,28 @@ pub struct TransactionReceipt { /// field, following `EIP-658`. /// /// Before Byzantinium hard fork, this field is always empty. - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub state_root: ::prost::alloc::vec::Vec, - #[prost(uint64, tag="2")] + #[prost(uint64, tag = "2")] pub cumulative_gas_used: u64, - #[prost(bytes="vec", tag="3")] + #[prost(bytes = "vec", tag = "3")] pub logs_bloom: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag="4")] + #[prost(message, repeated, tag = "4")] pub logs: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Log { - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub address: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", repeated, tag="2")] + #[prost(bytes = "vec", repeated, tag = "2")] pub topics: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, - #[prost(bytes="vec", tag="3")] + #[prost(bytes = "vec", tag = "3")] pub data: ::prost::alloc::vec::Vec, /// Index is the index of the log relative to the transaction. This index /// is always populated regardless of the state revertion of the the call /// that emitted this log. - #[prost(uint32, tag="4")] + #[prost(uint32, tag = "4")] pub index: u32, /// BlockIndex represents the index of the log relative to the Block. /// @@ -333,53 +378,57 @@ pub struct Log { /// /// In the case of `calls` case, for `call` where `stateReverted == true`, /// the `blockIndex` value will always be 0. - #[prost(uint32, tag="6")] + #[prost(uint32, tag = "6")] pub block_index: u32, - #[prost(uint64, tag="7")] + #[prost(uint64, tag = "7")] pub ordinal: u64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Call { - #[prost(uint32, tag="1")] + #[prost(uint32, tag = "1")] pub index: u32, - #[prost(uint32, tag="2")] + #[prost(uint32, tag = "2")] pub parent_index: u32, - #[prost(uint32, tag="3")] + #[prost(uint32, tag = "3")] pub depth: u32, - #[prost(enumeration="CallType", tag="4")] + #[prost(enumeration = "CallType", tag = "4")] pub call_type: i32, - #[prost(bytes="vec", tag="5")] + #[prost(bytes = "vec", tag = "5")] pub caller: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="6")] + #[prost(bytes = "vec", tag = "6")] pub address: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag="7")] + #[prost(message, optional, tag = "7")] pub value: ::core::option::Option, - #[prost(uint64, tag="8")] + #[prost(uint64, tag = "8")] pub gas_limit: u64, - #[prost(uint64, tag="9")] + #[prost(uint64, tag = "9")] pub gas_consumed: u64, - #[prost(bytes="vec", tag="13")] + #[prost(bytes = "vec", tag = "13")] pub return_data: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="14")] + #[prost(bytes = "vec", tag = "14")] pub input: ::prost::alloc::vec::Vec, - #[prost(bool, tag="15")] + #[prost(bool, tag = "15")] pub executed_code: bool, - #[prost(bool, tag="16")] + #[prost(bool, tag = "16")] pub suicide: bool, - /// hex representation of the hash -> preimage - #[prost(map="string, string", tag="20")] - pub keccak_preimages: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, - #[prost(message, repeated, tag="21")] + /// hex representation of the hash -> preimage + #[prost(map = "string, string", tag = "20")] + pub keccak_preimages: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + #[prost(message, repeated, tag = "21")] pub storage_changes: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag="22")] + #[prost(message, repeated, tag = "22")] pub balance_changes: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag="24")] + #[prost(message, repeated, tag = "24")] pub nonce_changes: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag="25")] + #[prost(message, repeated, tag = "25")] pub logs: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag="26")] + #[prost(message, repeated, tag = "26")] pub code_changes: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag="28")] + #[prost(message, repeated, tag = "28")] pub gas_changes: ::prost::alloc::vec::Vec, /// In Ethereum, a call can be either: /// - Successfull, execution passes without any problem encountered @@ -389,13 +438,13 @@ pub struct Call { /// When a call is either `failed` or `reverted`, the `status_failed` field /// below is set to `true`. If the status is `reverted`, then both `status_failed` /// and `status_reverted` are going to be set to `true`. - #[prost(bool, tag="10")] + #[prost(bool, tag = "10")] pub status_failed: bool, - #[prost(bool, tag="12")] + #[prost(bool, tag = "12")] pub status_reverted: bool, /// Populated when a call either failed or reverted, so when `status_failed == true`, /// see above for details about those flags. - #[prost(string, tag="11")] + #[prost(string, tag = "11")] pub failure_reason: ::prost::alloc::string::String, /// This field represents wheter or not the state changes performed /// by this call were correctly recorded by the blockchain. @@ -408,49 +457,51 @@ pub struct Call { /// changes it performed. /// /// ```text - /// Trx 1 - /// Call #1 - /// Call #2 - /// Call #3 - /// |--- Failure here - /// Call #4 + /// Trx 1 + /// Call #1 + /// Call #2 + /// Call #3 + /// |--- Failure here + /// Call #4 /// ``` /// /// In the transaction above, while Call #2 and Call #3 would have the /// status `EXECUTED` - #[prost(bool, tag="30")] + #[prost(bool, tag = "30")] pub state_reverted: bool, - #[prost(uint64, tag="31")] + #[prost(uint64, tag = "31")] pub begin_ordinal: u64, - #[prost(uint64, tag="32")] + #[prost(uint64, tag = "32")] pub end_ordinal: u64, - #[prost(message, repeated, tag="33")] + #[prost(message, repeated, tag = "33")] pub account_creations: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct StorageChange { - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub address: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub key: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="3")] + #[prost(bytes = "vec", tag = "3")] pub old_value: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="4")] + #[prost(bytes = "vec", tag = "4")] pub new_value: ::prost::alloc::vec::Vec, - #[prost(uint64, tag="5")] + #[prost(uint64, tag = "5")] pub ordinal: u64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BalanceChange { - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub address: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub old_value: ::core::option::Option, - #[prost(message, optional, tag="3")] + #[prost(message, optional, tag = "3")] pub new_value: ::core::option::Option, - #[prost(enumeration="balance_change::Reason", tag="4")] + #[prost(enumeration = "balance_change::Reason", tag = "4")] pub reason: i32, - #[prost(uint64, tag="5")] + #[prost(uint64, tag = "5")] pub ordinal: u64, } /// Nested message and enum types in `BalanceChange`. @@ -460,7 +511,17 @@ pub mod balance_change { /// ```shell /// ack -ho 'BalanceChangeReason\(".*"\)' | grep -Eo '".*"' | sort | uniq /// ``` - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum Reason { Unknown = 0, @@ -481,38 +542,89 @@ pub mod balance_change { /// Used on chain(s) where some Ether burning happens Burn = 15, } + impl Reason { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Reason::Unknown => "REASON_UNKNOWN", + Reason::RewardMineUncle => "REASON_REWARD_MINE_UNCLE", + Reason::RewardMineBlock => "REASON_REWARD_MINE_BLOCK", + Reason::DaoRefundContract => "REASON_DAO_REFUND_CONTRACT", + Reason::DaoAdjustBalance => "REASON_DAO_ADJUST_BALANCE", + Reason::Transfer => "REASON_TRANSFER", + Reason::GenesisBalance => "REASON_GENESIS_BALANCE", + Reason::GasBuy => "REASON_GAS_BUY", + Reason::RewardTransactionFee => "REASON_REWARD_TRANSACTION_FEE", + Reason::RewardFeeReset => "REASON_REWARD_FEE_RESET", + Reason::GasRefund => "REASON_GAS_REFUND", + Reason::TouchAccount => "REASON_TOUCH_ACCOUNT", + Reason::SuicideRefund => "REASON_SUICIDE_REFUND", + Reason::SuicideWithdraw => "REASON_SUICIDE_WITHDRAW", + Reason::CallBalanceOverride => "REASON_CALL_BALANCE_OVERRIDE", + Reason::Burn => "REASON_BURN", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "REASON_UNKNOWN" => Some(Self::Unknown), + "REASON_REWARD_MINE_UNCLE" => Some(Self::RewardMineUncle), + "REASON_REWARD_MINE_BLOCK" => Some(Self::RewardMineBlock), + "REASON_DAO_REFUND_CONTRACT" => Some(Self::DaoRefundContract), + "REASON_DAO_ADJUST_BALANCE" => Some(Self::DaoAdjustBalance), + "REASON_TRANSFER" => Some(Self::Transfer), + "REASON_GENESIS_BALANCE" => Some(Self::GenesisBalance), + "REASON_GAS_BUY" => Some(Self::GasBuy), + "REASON_REWARD_TRANSACTION_FEE" => Some(Self::RewardTransactionFee), + "REASON_REWARD_FEE_RESET" => Some(Self::RewardFeeReset), + "REASON_GAS_REFUND" => Some(Self::GasRefund), + "REASON_TOUCH_ACCOUNT" => Some(Self::TouchAccount), + "REASON_SUICIDE_REFUND" => Some(Self::SuicideRefund), + "REASON_SUICIDE_WITHDRAW" => Some(Self::SuicideWithdraw), + "REASON_CALL_BALANCE_OVERRIDE" => Some(Self::CallBalanceOverride), + "REASON_BURN" => Some(Self::Burn), + _ => None, + } + } + } } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct NonceChange { - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub address: ::prost::alloc::vec::Vec, - #[prost(uint64, tag="2")] + #[prost(uint64, tag = "2")] pub old_value: u64, - #[prost(uint64, tag="3")] + #[prost(uint64, tag = "3")] pub new_value: u64, - #[prost(uint64, tag="4")] + #[prost(uint64, tag = "4")] pub ordinal: u64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccountCreation { - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub account: ::prost::alloc::vec::Vec, - #[prost(uint64, tag="2")] + #[prost(uint64, tag = "2")] pub ordinal: u64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CodeChange { - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub address: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub old_hash: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="3")] + #[prost(bytes = "vec", tag = "3")] pub old_code: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="4")] + #[prost(bytes = "vec", tag = "4")] pub new_hash: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="5")] + #[prost(bytes = "vec", tag = "5")] pub new_code: ::prost::alloc::vec::Vec, - #[prost(uint64, tag="6")] + #[prost(uint64, tag = "6")] pub ordinal: u64, } /// The gas change model represents the reason why some gas cost has occurred. @@ -521,15 +633,16 @@ pub struct CodeChange { /// /// Hence, we only index some of them, those that are costy like all the calls /// one, log events, return data, etc. +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct GasChange { - #[prost(uint64, tag="1")] + #[prost(uint64, tag = "1")] pub old_value: u64, - #[prost(uint64, tag="2")] + #[prost(uint64, tag = "2")] pub new_value: u64, - #[prost(enumeration="gas_change::Reason", tag="3")] + #[prost(enumeration = "gas_change::Reason", tag = "3")] pub reason: i32, - #[prost(uint64, tag="4")] + #[prost(uint64, tag = "4")] pub ordinal: u64, } /// Nested message and enum types in `GasChange`. @@ -539,7 +652,17 @@ pub mod gas_change { /// ```shell /// ack -ho 'GasChangeReason\(".*"\)' | grep -Eo '".*"' | sort | uniq /// ``` - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum Reason { Unknown = 0, @@ -565,6 +688,64 @@ pub mod gas_change { /// Added in Berlin fork (Geth 1.10+) StateColdAccess = 20, } + impl Reason { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Reason::Unknown => "REASON_UNKNOWN", + Reason::Call => "REASON_CALL", + Reason::CallCode => "REASON_CALL_CODE", + Reason::CallDataCopy => "REASON_CALL_DATA_COPY", + Reason::CodeCopy => "REASON_CODE_COPY", + Reason::CodeStorage => "REASON_CODE_STORAGE", + Reason::ContractCreation => "REASON_CONTRACT_CREATION", + Reason::ContractCreation2 => "REASON_CONTRACT_CREATION2", + Reason::DelegateCall => "REASON_DELEGATE_CALL", + Reason::EventLog => "REASON_EVENT_LOG", + Reason::ExtCodeCopy => "REASON_EXT_CODE_COPY", + Reason::FailedExecution => "REASON_FAILED_EXECUTION", + Reason::IntrinsicGas => "REASON_INTRINSIC_GAS", + Reason::PrecompiledContract => "REASON_PRECOMPILED_CONTRACT", + Reason::RefundAfterExecution => "REASON_REFUND_AFTER_EXECUTION", + Reason::Return => "REASON_RETURN", + Reason::ReturnDataCopy => "REASON_RETURN_DATA_COPY", + Reason::Revert => "REASON_REVERT", + Reason::SelfDestruct => "REASON_SELF_DESTRUCT", + Reason::StaticCall => "REASON_STATIC_CALL", + Reason::StateColdAccess => "REASON_STATE_COLD_ACCESS", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "REASON_UNKNOWN" => Some(Self::Unknown), + "REASON_CALL" => Some(Self::Call), + "REASON_CALL_CODE" => Some(Self::CallCode), + "REASON_CALL_DATA_COPY" => Some(Self::CallDataCopy), + "REASON_CODE_COPY" => Some(Self::CodeCopy), + "REASON_CODE_STORAGE" => Some(Self::CodeStorage), + "REASON_CONTRACT_CREATION" => Some(Self::ContractCreation), + "REASON_CONTRACT_CREATION2" => Some(Self::ContractCreation2), + "REASON_DELEGATE_CALL" => Some(Self::DelegateCall), + "REASON_EVENT_LOG" => Some(Self::EventLog), + "REASON_EXT_CODE_COPY" => Some(Self::ExtCodeCopy), + "REASON_FAILED_EXECUTION" => Some(Self::FailedExecution), + "REASON_INTRINSIC_GAS" => Some(Self::IntrinsicGas), + "REASON_PRECOMPILED_CONTRACT" => Some(Self::PrecompiledContract), + "REASON_REFUND_AFTER_EXECUTION" => Some(Self::RefundAfterExecution), + "REASON_RETURN" => Some(Self::Return), + "REASON_RETURN_DATA_COPY" => Some(Self::ReturnDataCopy), + "REASON_REVERT" => Some(Self::Revert), + "REASON_SELF_DESTRUCT" => Some(Self::SelfDestruct), + "REASON_STATIC_CALL" => Some(Self::StaticCall), + "REASON_STATE_COLD_ACCESS" => Some(Self::StateColdAccess), + _ => None, + } + } + } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] @@ -574,6 +755,30 @@ pub enum TransactionTraceStatus { Failed = 2, Reverted = 3, } +impl TransactionTraceStatus { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + TransactionTraceStatus::Unknown => "UNKNOWN", + TransactionTraceStatus::Succeeded => "SUCCEEDED", + TransactionTraceStatus::Failed => "FAILED", + TransactionTraceStatus::Reverted => "REVERTED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNKNOWN" => Some(Self::Unknown), + "SUCCEEDED" => Some(Self::Succeeded), + "FAILED" => Some(Self::Failed), + "REVERTED" => Some(Self::Reverted), + _ => None, + } + } +} #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum CallType { @@ -586,3 +791,31 @@ pub enum CallType { /// create2 ? any other form of calls? Create = 5, } +impl CallType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + CallType::Unspecified => "UNSPECIFIED", + CallType::Call => "CALL", + CallType::Callcode => "CALLCODE", + CallType::Delegate => "DELEGATE", + CallType::Static => "STATIC", + CallType::Create => "CREATE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNSPECIFIED" => Some(Self::Unspecified), + "CALL" => Some(Self::Call), + "CALLCODE" => Some(Self::Callcode), + "DELEGATE" => Some(Self::Delegate), + "STATIC" => Some(Self::Static), + "CREATE" => Some(Self::Create), + _ => None, + } + } +} diff --git a/chain/ethereum/src/runtime/runtime_adapter.rs b/chain/ethereum/src/runtime/runtime_adapter.rs index e3f4a17198a..f5cac662cde 100644 --- a/chain/ethereum/src/runtime/runtime_adapter.rs +++ b/chain/ethereum/src/runtime/runtime_adapter.rs @@ -43,13 +43,10 @@ impl blockchain::RuntimeAdapter for RuntimeAdapter { fn host_fns(&self, ds: &DataSource) -> Result, Error> { let abis = ds.mapping.abis.clone(); let call_cache = self.call_cache.cheap_clone(); - let eth_adapter = self - .eth_adapters - .cheapest_with(&NodeCapabilities { - archive: ds.mapping.requires_archive()?, - traces: false, - })? - .cheap_clone(); + let eth_adapter = self.eth_adapters.call_or_cheapest(Some(&NodeCapabilities { + archive: ds.mapping.requires_archive()?, + traces: false, + }))?; let ethereum_call = HostFn { name: "ethereum.call", diff --git a/chain/ethereum/src/tests.rs b/chain/ethereum/src/tests.rs index eee594fdbc4..9c4a46130e5 100644 --- a/chain/ethereum/src/tests.rs +++ b/chain/ethereum/src/tests.rs @@ -4,8 +4,9 @@ use graph::{ blockchain::{block_stream::BlockWithTriggers, BlockPtr}, prelude::{ web3::types::{Address, Bytes, Log, H160, H256, U64}, - EthereumCall, + EthereumCall, LightEthereumBlock, }, + slog::{self, o, Logger}, }; use crate::{ @@ -31,6 +32,7 @@ fn test_trigger_ordering() { let mut call2 = EthereumCall::default(); call2.transaction_index = 2; + call2.input = Bytes(vec![0]); let call2 = EthereumTrigger::Call(Arc::new(call2)); let mut call3 = EthereumCall::default(); @@ -40,6 +42,8 @@ fn test_trigger_ordering() { // Call with the same tx index as call2 let mut call4 = EthereumCall::default(); call4.transaction_index = 2; + // different than call2 so they don't get mistaken as the same + call4.input = Bytes(vec![1]); let call4 = EthereumTrigger::Call(Arc::new(call4)); fn create_log(tx_index: u64, log_index: u64) -> Arc { @@ -86,12 +90,121 @@ fn test_trigger_ordering() { log1.clone(), ]; + let logger = Logger::root(slog::Discard, o!()); + + let mut b: LightEthereumBlock = Default::default(); + + // This is necessary because inside of BlockWithTriggers::new + // there's a log for both fields. So just using Default above + // gives None on them. + b.number = Some(Default::default()); + b.hash = Some(Default::default()); + // Test that `BlockWithTriggers` sorts the triggers. - let block_with_triggers = - BlockWithTriggers::::new(BlockFinality::Final(Default::default()), triggers); + let block_with_triggers = BlockWithTriggers::::new( + BlockFinality::Final(Arc::new(b)), + triggers, + &logger, + ); assert_eq!( block_with_triggers.trigger_data, vec![log1, log2, call1, log3, call2, call4, call3, block2, block1] ); } + +#[test] +fn test_trigger_dedup() { + let block1 = EthereumTrigger::Block( + BlockPtr::from((H256::random(), 1u64)), + EthereumBlockTriggerType::Every, + ); + + let block2 = EthereumTrigger::Block( + BlockPtr::from((H256::random(), 0u64)), + EthereumBlockTriggerType::WithCallTo(Address::random()), + ); + + // duplicate block2 + let block3 = block2.clone(); + + let mut call1 = EthereumCall::default(); + call1.transaction_index = 1; + let call1 = EthereumTrigger::Call(Arc::new(call1)); + + let mut call2 = EthereumCall::default(); + call2.transaction_index = 2; + let call2 = EthereumTrigger::Call(Arc::new(call2)); + + let mut call3 = EthereumCall::default(); + call3.transaction_index = 3; + let call3 = EthereumTrigger::Call(Arc::new(call3)); + + // duplicate call2 + let mut call4 = EthereumCall::default(); + call4.transaction_index = 2; + let call4 = EthereumTrigger::Call(Arc::new(call4)); + + fn create_log(tx_index: u64, log_index: u64) -> Arc { + Arc::new(Log { + address: H160::default(), + topics: vec![], + data: Bytes::default(), + block_hash: Some(H256::zero()), + block_number: Some(U64::zero()), + transaction_hash: Some(H256::zero()), + transaction_index: Some(tx_index.into()), + log_index: Some(log_index.into()), + transaction_log_index: Some(log_index.into()), + log_type: Some("".into()), + removed: Some(false), + }) + } + + let log1 = EthereumTrigger::Log(create_log(1, 0), None); + let log2 = EthereumTrigger::Log(create_log(1, 1), None); + let log3 = EthereumTrigger::Log(create_log(2, 5), None); + // duplicate logs 2 and 3 + let log4 = log2.clone(); + let log5 = log3.clone(); + + let triggers = vec![ + // Call triggers + call3.clone(), + call1.clone(), + call2.clone(), + call4, + // Block triggers + block3, + block2.clone(), + block1.clone(), + // Event triggers + log5, + log4, + log3.clone(), + log2.clone(), + log1.clone(), + ]; + + let logger = Logger::root(slog::Discard, o!()); + + let mut b: LightEthereumBlock = Default::default(); + + // This is necessary because inside of BlockWithTriggers::new + // there's a log for both fields. So just using Default above + // gives None on them. + b.number = Some(Default::default()); + b.hash = Some(Default::default()); + + // Test that `BlockWithTriggers` sorts the triggers. + let block_with_triggers = BlockWithTriggers::::new( + BlockFinality::Final(Arc::new(b)), + triggers, + &logger, + ); + + assert_eq!( + block_with_triggers.trigger_data, + vec![log1, log2, call1, log3, call2, call3, block2, block1] + ); +} diff --git a/chain/ethereum/src/transport.rs b/chain/ethereum/src/transport.rs index 2d4302ceb56..b30fd17d84b 100644 --- a/chain/ethereum/src/transport.rs +++ b/chain/ethereum/src/transport.rs @@ -22,7 +22,7 @@ impl Transport { pub async fn new_ipc(ipc: &str) -> Self { ipc::Ipc::new(ipc) .await - .map(|transport| Transport::IPC(transport)) + .map(Transport::IPC) .expect("Failed to connect to Ethereum IPC") } @@ -30,7 +30,7 @@ impl Transport { pub async fn new_ws(ws: &str) -> Self { ws::WebSocket::new(ws) .await - .map(|transport| Transport::WS(transport)) + .map(Transport::WS) .expect("Failed to connect to Ethereum WS") } diff --git a/chain/ethereum/src/trigger.rs b/chain/ethereum/src/trigger.rs index 6aaae1a65b3..9b609668b1f 100644 --- a/chain/ethereum/src/trigger.rs +++ b/chain/ethereum/src/trigger.rs @@ -404,13 +404,13 @@ impl From<&'_ Transaction> for EthereumTransactionData { gas_limit: tx.gas, gas_price: tx.gas_price.unwrap_or(U256::zero()), // EIP-1559 made this optional. input: tx.input.0.clone(), - nonce: tx.nonce.clone(), + nonce: tx.nonce, } } } /// An Ethereum event logged from a specific contract address and block. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct EthereumEventData { pub address: Address, pub log_index: U256, @@ -421,29 +421,8 @@ pub struct EthereumEventData { pub params: Vec, } -impl Clone for EthereumEventData { - fn clone(&self) -> Self { - EthereumEventData { - address: self.address, - log_index: self.log_index, - transaction_log_index: self.transaction_log_index, - log_type: self.log_type.clone(), - block: self.block.clone(), - transaction: self.transaction.clone(), - params: self - .params - .iter() - .map(|log_param| LogParam { - name: log_param.name.clone(), - value: log_param.value.clone(), - }) - .collect(), - } - } -} - /// An Ethereum call executed within a transaction within a block to a contract address. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct EthereumCallData { pub from: Address, pub to: Address, @@ -452,30 +431,3 @@ pub struct EthereumCallData { pub inputs: Vec, pub outputs: Vec, } - -impl Clone for EthereumCallData { - fn clone(&self) -> Self { - EthereumCallData { - to: self.to, - from: self.from, - block: self.block.clone(), - transaction: self.transaction.clone(), - inputs: self - .inputs - .iter() - .map(|log_param| LogParam { - name: log_param.name.clone(), - value: log_param.value.clone(), - }) - .collect(), - outputs: self - .outputs - .iter() - .map(|log_param| LogParam { - name: log_param.name.clone(), - value: log_param.value.clone(), - }) - .collect(), - } - } -} diff --git a/chain/ethereum/tests/manifest.rs b/chain/ethereum/tests/manifest.rs index 2c82f1ff657..d99d36ce938 100644 --- a/chain/ethereum/tests/manifest.rs +++ b/chain/ethereum/tests/manifest.rs @@ -36,10 +36,8 @@ struct TextResolver { impl TextResolver { fn add(&mut self, link: &str, text: &impl AsRef<[u8]>) { - self.texts.insert( - link.to_owned(), - text.as_ref().into_iter().cloned().collect(), - ); + self.texts + .insert(link.to_owned(), text.as_ref().iter().cloned().collect()); } } @@ -149,7 +147,7 @@ templates: specVersion: 0.0.7 "; - let manifest = resolve_manifest(&yaml, SPEC_VERSION_0_0_7).await; + let manifest = resolve_manifest(yaml, SPEC_VERSION_0_0_7).await; assert_eq!("Qmmanifest", manifest.id.as_str()); assert_eq!(manifest.data_sources.len(), 0); @@ -237,13 +235,12 @@ specVersion: 0.0.2 // Meaning that the graft will fail just like it's parent // but it started at a valid previous block. assert!( - unvalidated + !unvalidated .validate(subgraph_store.clone(), true) .await .expect_err("Validation must fail") .into_iter() - .find(|e| matches!(e, SubgraphManifestValidationError::GraftBaseInvalid(_))) - .is_none(), + .any(|e| matches!(&e, SubgraphManifestValidationError::GraftBaseInvalid(_))), "There shouldn't be a GraftBaseInvalid error" ); diff --git a/chain/near/Cargo.toml b/chain/near/Cargo.toml index e61b5609477..d41b901159e 100644 --- a/chain/near/Cargo.toml +++ b/chain/near/Cargo.toml @@ -1,16 +1,16 @@ [package] name = "graph-chain-near" -version = "0.27.0" -edition = "2021" +version.workspace = true +edition.workspace = true [build-dependencies] -tonic-build = { version = "0.7.1", features = ["prost"] } +tonic-build = { workspace = true } [dependencies] -base64 = "0.13" +base64 = "0.20" graph = { path = "../../graph" } -prost = "0.10.1" -prost-types = "0.10.1" +prost = { workspace = true } +prost-types = { workspace = true } serde = "1.0" graph-runtime-wasm = { path = "../../runtime/wasm" } diff --git a/chain/near/src/adapter.rs b/chain/near/src/adapter.rs index 1b3969251a7..89c95b20c28 100644 --- a/chain/near/src/adapter.rs +++ b/chain/near/src/adapter.rs @@ -1,6 +1,5 @@ use std::collections::HashSet; -use crate::capabilities::NodeCapabilities; use crate::data_source::PartialAccounts; use crate::{data_source::DataSource, Chain}; use graph::blockchain as bc; @@ -29,8 +28,8 @@ impl bc::TriggerFilter for TriggerFilter { receipt_filter.extend(NearReceiptFilter::from_data_sources(data_sources)); } - fn node_capabilities(&self) -> NodeCapabilities { - NodeCapabilities {} + fn node_capabilities(&self) -> bc::EmptyNodeCapabilities { + bc::EmptyNodeCapabilities::default() } fn extend_with_template( @@ -147,7 +146,7 @@ impl NearReceiptFilter { let partial_accounts: Vec<(Option, Option)> = sources .iter() .filter(|s| s.partial_accounts.is_some()) - .map(|s| { + .flat_map(|s| { let partials = s.partial_accounts.as_ref().unwrap(); let mut pairs: Vec<(Option, Option)> = vec![]; @@ -181,7 +180,6 @@ impl NearReceiptFilter { pairs }) - .flatten() .collect(); Self { diff --git a/chain/near/src/capabilities.rs b/chain/near/src/capabilities.rs deleted file mode 100644 index 0d84c9c555d..00000000000 --- a/chain/near/src/capabilities.rs +++ /dev/null @@ -1,37 +0,0 @@ -use graph::{anyhow::Error, impl_slog_value}; -use std::cmp::{Ordering, PartialOrd}; -use std::fmt; -use std::str::FromStr; - -use crate::data_source::DataSource; - -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub struct NodeCapabilities {} - -impl PartialOrd for NodeCapabilities { - fn partial_cmp(&self, _other: &Self) -> Option { - None - } -} - -impl FromStr for NodeCapabilities { - type Err = Error; - - fn from_str(_s: &str) -> Result { - Ok(NodeCapabilities {}) - } -} - -impl fmt::Display for NodeCapabilities { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("near") - } -} - -impl_slog_value!(NodeCapabilities, "{}"); - -impl graph::blockchain::NodeCapabilities for NodeCapabilities { - fn from_data_sources(_data_sources: &[DataSource]) -> Self { - NodeCapabilities {} - } -} diff --git a/chain/near/src/chain.rs b/chain/near/src/chain.rs index 226a915f4c3..16551ef6df7 100644 --- a/chain/near/src/chain.rs +++ b/chain/near/src/chain.rs @@ -4,7 +4,6 @@ use graph::data::subgraph::UnifiedMappingApiVersion; use graph::firehose::{FirehoseEndpoint, FirehoseEndpoints}; use graph::prelude::{MetricsRegistry, TryFutureExt}; use graph::{ - anyhow, anyhow::Result, blockchain::{ block_stream::{ @@ -12,7 +11,8 @@ use graph::{ FirehoseMapper as FirehoseMapperTrait, TriggersAdapter as TriggersAdapterTrait, }, firehose_block_stream::FirehoseBlockStream, - BlockHash, BlockPtr, Blockchain, IngestorError, RuntimeAdapter as RuntimeAdapterTrait, + BlockHash, BlockPtr, Blockchain, EmptyNodeCapabilities, IngestorError, + RuntimeAdapter as RuntimeAdapterTrait, }, components::store::DeploymentLocator, firehose::{self as firehose, ForkStep}, @@ -22,7 +22,6 @@ use prost::Message; use std::sync::Arc; use crate::adapter::TriggerFilter; -use crate::capabilities::NodeCapabilities; use crate::data_source::{DataSourceTemplate, UnresolvedDataSourceTemplate}; use crate::runtime::RuntimeAdapter; use crate::trigger::{self, NearTrigger}; @@ -47,22 +46,21 @@ impl BlockStreamBuilder for NearStreamBuilder { unified_api_version: UnifiedMappingApiVersion, ) -> Result>> { let adapter = chain - .triggers_adapter(&deployment, &NodeCapabilities {}, unified_api_version) - .expect(&format!("no adapter for network {}", chain.name,)); + .triggers_adapter( + &deployment, + &EmptyNodeCapabilities::default(), + unified_api_version, + ) + .unwrap_or_else(|_| panic!("no adapter for network {}", chain.name)); - let firehose_endpoint = match chain.firehose_endpoints.random() { - Some(e) => e.clone(), - None => return Err(anyhow::format_err!("no firehose endpoint available")), - }; + let firehose_endpoint = chain.firehose_endpoints.random()?; let logger = chain .logger_factory .subgraph_logger(&deployment) .new(o!("component" => "FirehoseBlockStream")); - let firehose_mapper = Arc::new(FirehoseMapper { - endpoint: firehose_endpoint.cheap_clone(), - }); + let firehose_mapper = Arc::new(FirehoseMapper {}); Ok(Box::new(FirehoseBlockStream::new( deployment.hash, @@ -146,7 +144,7 @@ impl Blockchain for Chain { type TriggerFilter = crate::adapter::TriggerFilter; - type NodeCapabilities = crate::capabilities::NodeCapabilities; + type NodeCapabilities = EmptyNodeCapabilities; fn triggers_adapter( &self, @@ -180,6 +178,18 @@ impl Blockchain for Chain { .await } + fn is_refetch_block_required(&self) -> bool { + false + } + + async fn refetch_firehose_block( + &self, + _logger: &Logger, + _cursor: FirehoseCursor, + ) -> Result { + unimplemented!("This chain does not support Dynamic Data Sources. is_refetch_block_required always returns false, this shouldn't be called.") + } + async fn new_polling_block_stream( &self, _deployment: DeploymentLocator, @@ -200,10 +210,7 @@ impl Blockchain for Chain { logger: &Logger, number: BlockNumber, ) -> Result { - let firehose_endpoint = match self.firehose_endpoints.random() { - Some(e) => e.clone(), - None => return Err(anyhow::format_err!("no firehose endpoint available").into()), - }; + let firehose_endpoint = self.firehose_endpoints.random()?; firehose_endpoint .block_ptr_for_number::(logger, number) @@ -235,7 +242,7 @@ impl TriggersAdapterTrait for TriggersAdapter { async fn triggers_in_block( &self, - _logger: &Logger, + logger: &Logger, block: codec::Block, filter: &TriggerFilter, ) -> Result, Error> { @@ -292,7 +299,7 @@ impl TriggersAdapterTrait for TriggersAdapter { trigger_data.push(NearTrigger::Block(shared_block.cheap_clone())); } - Ok(BlockWithTriggers::new(block, trigger_data)) + Ok(BlockWithTriggers::new(block, trigger_data, logger)) } async fn is_on_main_chain(&self, _ptr: BlockPtr) -> Result { @@ -318,9 +325,7 @@ impl TriggersAdapterTrait for TriggersAdapter { } } -pub struct FirehoseMapper { - endpoint: Arc, -} +pub struct FirehoseMapper {} #[async_trait] impl FirehoseMapperTrait for FirehoseMapper { @@ -371,11 +376,11 @@ impl FirehoseMapperTrait for FirehoseMapper { )) } - StepIrreversible => { + StepFinal => { panic!("irreversible step is not handled and should not be requested in the Firehose request") } - StepUnknown => { + StepUnset => { panic!("unknown step should not happen in the Firehose response") } } @@ -384,9 +389,10 @@ impl FirehoseMapperTrait for FirehoseMapper { async fn block_ptr_for_number( &self, logger: &Logger, + endpoint: &Arc, number: BlockNumber, ) -> Result { - self.endpoint + endpoint .block_ptr_for_number::(logger, number) .await } @@ -394,12 +400,12 @@ impl FirehoseMapperTrait for FirehoseMapper { async fn final_block_ptr_for( &self, logger: &Logger, + endpoint: &Arc, block: &codec::Block, ) -> Result { let final_block_number = block.header().last_final_block_height as BlockNumber; - self.endpoint - .block_ptr_for_number::(logger, final_block_number) + self.block_ptr_for_number(logger, endpoint, final_block_number) .await } } @@ -569,7 +575,7 @@ mod test { true, case.expected .iter() - .all(|x| receipt.partial_accounts.contains(&x)), + .all(|x| receipt.partial_accounts.contains(x)), "name: {}\npartial_accounts: {:?}", case.name, receipt.partial_accounts, diff --git a/chain/near/src/data_source.rs b/chain/near/src/data_source.rs index c0fa5c6371e..5a9c1e4373f 100644 --- a/chain/near/src/data_source.rs +++ b/chain/near/src/data_source.rs @@ -11,7 +11,7 @@ use graph::{ }, semver, }; -use std::{convert::TryFrom, sync::Arc}; +use std::sync::Arc; use crate::chain::Chain; use crate::trigger::{NearTrigger, ReceiptWithOutcome}; @@ -31,6 +31,41 @@ pub struct DataSource { } impl blockchain::DataSource for DataSource { + fn from_template_info(_template_info: DataSourceTemplateInfo) -> Result { + Err(anyhow!("Near subgraphs do not support templates")) + + // How this might be implemented if/when Near gets support for templates: + // let DataSourceTemplateInfo { + // template, + // params, + // context, + // creation_block, + // } = info; + + // let account = params + // .get(0) + // .with_context(|| { + // format!( + // "Failed to create data source from template `{}`: account parameter is missing", + // template.name + // ) + // })? + // .clone(); + + // Ok(DataSource { + // kind: template.kind, + // network: template.network, + // name: template.name, + // source: Source { + // account, + // start_block: 0, + // }, + // mapping: template.mapping, + // context: Arc::new(context), + // creation_block: Some(creation_block), + // }) + } + fn address(&self) -> Option<&[u8]> { self.source.account.as_ref().map(String::as_bytes) } @@ -118,7 +153,7 @@ impl blockchain::DataSource for DataSource { } fn network(&self) -> Option<&str> { - self.network.as_ref().map(|s| s.as_str()) + self.network.as_deref() } fn context(&self) -> Arc> { @@ -292,45 +327,6 @@ impl blockchain::UnresolvedDataSource for UnresolvedDataSource { } } -impl TryFrom> for DataSource { - type Error = Error; - - fn try_from(_info: DataSourceTemplateInfo) -> Result { - Err(anyhow!("Near subgraphs do not support templates")) - - // How this might be implemented if/when Near gets support for templates: - // let DataSourceTemplateInfo { - // template, - // params, - // context, - // creation_block, - // } = info; - - // let account = params - // .get(0) - // .with_context(|| { - // format!( - // "Failed to create data source from template `{}`: account parameter is missing", - // template.name - // ) - // })? - // .clone(); - - // Ok(DataSource { - // kind: template.kind, - // network: template.network, - // name: template.name, - // source: Source { - // account, - // start_block: 0, - // }, - // mapping: template.mapping, - // context: Arc::new(context), - // creation_block: Some(creation_block), - // }) - } -} - #[derive(Clone, Debug, Default, Hash, Eq, PartialEq, Deserialize)] pub struct BaseDataSourceTemplate { pub kind: String, diff --git a/chain/near/src/lib.rs b/chain/near/src/lib.rs index 2ab7dd83583..c1fe4c8cfa6 100644 --- a/chain/near/src/lib.rs +++ b/chain/near/src/lib.rs @@ -1,5 +1,4 @@ mod adapter; -mod capabilities; mod chain; pub mod codec; mod data_source; diff --git a/chain/near/src/protobuf/sf.near.codec.v1.rs b/chain/near/src/protobuf/sf.near.codec.v1.rs index 4a58d01d0c7..a89d63ae341 100644 --- a/chain/near/src/protobuf/sf.near.codec.v1.rs +++ b/chain/near/src/protobuf/sf.near.codec.v1.rs @@ -1,14 +1,15 @@ +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Block { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub author: ::prost::alloc::string::String, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub header: ::core::option::Option, - #[prost(message, repeated, tag="3")] + #[prost(message, repeated, tag = "3")] pub chunk_headers: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag="4")] + #[prost(message, repeated, tag = "4")] pub shards: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag="5")] + #[prost(message, repeated, tag = "5")] pub state_changes: ::prost::alloc::vec::Vec, } /// HeaderOnlyBlock is a standard \[Block\] structure where all other fields are @@ -17,788 +18,912 @@ pub struct Block { /// /// This can be used to unpack a \[Block\] when only the \[BlockHeader\] information /// is required and greatly reduced required memory. +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct HeaderOnlyBlock { - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub header: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct StateChangeWithCause { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub value: ::core::option::Option, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub cause: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct StateChangeCause { - #[prost(oneof="state_change_cause::Cause", tags="1, 2, 3, 4, 5, 6, 7, 8, 9, 10")] + #[prost(oneof = "state_change_cause::Cause", tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10")] pub cause: ::core::option::Option, } /// Nested message and enum types in `StateChangeCause`. pub mod state_change_cause { + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] - pub struct NotWritableToDisk { - } + pub struct NotWritableToDisk {} + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] - pub struct InitialState { - } + pub struct InitialState {} + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransactionProcessing { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub tx_hash: ::core::option::Option, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ActionReceiptProcessingStarted { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub receipt_hash: ::core::option::Option, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ActionReceiptGasReward { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub tx_hash: ::core::option::Option, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReceiptProcessing { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub tx_hash: ::core::option::Option, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct PostponedReceipt { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub tx_hash: ::core::option::Option, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] - pub struct UpdatedDelayedReceipts { - } + pub struct UpdatedDelayedReceipts {} + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] - pub struct ValidatorAccountsUpdate { - } + pub struct ValidatorAccountsUpdate {} + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] - pub struct Migration { - } + pub struct Migration {} + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Cause { - #[prost(message, tag="1")] + #[prost(message, tag = "1")] NotWritableToDisk(NotWritableToDisk), - #[prost(message, tag="2")] + #[prost(message, tag = "2")] InitialState(InitialState), - #[prost(message, tag="3")] + #[prost(message, tag = "3")] TransactionProcessing(TransactionProcessing), - #[prost(message, tag="4")] + #[prost(message, tag = "4")] ActionReceiptProcessingStarted(ActionReceiptProcessingStarted), - #[prost(message, tag="5")] + #[prost(message, tag = "5")] ActionReceiptGasReward(ActionReceiptGasReward), - #[prost(message, tag="6")] + #[prost(message, tag = "6")] ReceiptProcessing(ReceiptProcessing), - #[prost(message, tag="7")] + #[prost(message, tag = "7")] PostponedReceipt(PostponedReceipt), - #[prost(message, tag="8")] + #[prost(message, tag = "8")] UpdatedDelayedReceipts(UpdatedDelayedReceipts), - #[prost(message, tag="9")] + #[prost(message, tag = "9")] ValidatorAccountsUpdate(ValidatorAccountsUpdate), - #[prost(message, tag="10")] + #[prost(message, tag = "10")] Migration(Migration), } } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct StateChangeValue { - #[prost(oneof="state_change_value::Value", tags="1, 2, 3, 4, 5, 6, 7, 8")] + #[prost(oneof = "state_change_value::Value", tags = "1, 2, 3, 4, 5, 6, 7, 8")] pub value: ::core::option::Option, } /// Nested message and enum types in `StateChangeValue`. pub mod state_change_value { + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccountUpdate { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub account: ::core::option::Option, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccountDeletion { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccessKeyUpdate { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub public_key: ::core::option::Option, - #[prost(message, optional, tag="3")] + #[prost(message, optional, tag = "3")] pub access_key: ::core::option::Option, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccessKeyDeletion { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub public_key: ::core::option::Option, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DataUpdate { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub key: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="3")] + #[prost(bytes = "vec", tag = "3")] pub value: ::prost::alloc::vec::Vec, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DataDeletion { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub key: ::prost::alloc::vec::Vec, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ContractCodeUpdate { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub code: ::prost::alloc::vec::Vec, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ContractCodeDeletion { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Value { - #[prost(message, tag="1")] + #[prost(message, tag = "1")] AccountUpdate(AccountUpdate), - #[prost(message, tag="2")] + #[prost(message, tag = "2")] AccountDeletion(AccountDeletion), - #[prost(message, tag="3")] + #[prost(message, tag = "3")] AccessKeyUpdate(AccessKeyUpdate), - #[prost(message, tag="4")] + #[prost(message, tag = "4")] AccessKeyDeletion(AccessKeyDeletion), - #[prost(message, tag="5")] + #[prost(message, tag = "5")] DataUpdate(DataUpdate), - #[prost(message, tag="6")] + #[prost(message, tag = "6")] DataDeletion(DataDeletion), - #[prost(message, tag="7")] + #[prost(message, tag = "7")] ContractCodeUpdate(ContractCodeUpdate), - #[prost(message, tag="8")] + #[prost(message, tag = "8")] ContractDeletion(ContractCodeDeletion), } } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Account { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub amount: ::core::option::Option, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub locked: ::core::option::Option, - #[prost(message, optional, tag="3")] + #[prost(message, optional, tag = "3")] pub code_hash: ::core::option::Option, - #[prost(uint64, tag="4")] + #[prost(uint64, tag = "4")] pub storage_usage: u64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BlockHeader { - #[prost(uint64, tag="1")] + #[prost(uint64, tag = "1")] pub height: u64, - #[prost(uint64, tag="2")] + #[prost(uint64, tag = "2")] pub prev_height: u64, - #[prost(message, optional, tag="3")] + #[prost(message, optional, tag = "3")] pub epoch_id: ::core::option::Option, - #[prost(message, optional, tag="4")] + #[prost(message, optional, tag = "4")] pub next_epoch_id: ::core::option::Option, - #[prost(message, optional, tag="5")] + #[prost(message, optional, tag = "5")] pub hash: ::core::option::Option, - #[prost(message, optional, tag="6")] + #[prost(message, optional, tag = "6")] pub prev_hash: ::core::option::Option, - #[prost(message, optional, tag="7")] + #[prost(message, optional, tag = "7")] pub prev_state_root: ::core::option::Option, - #[prost(message, optional, tag="8")] + #[prost(message, optional, tag = "8")] pub chunk_receipts_root: ::core::option::Option, - #[prost(message, optional, tag="9")] + #[prost(message, optional, tag = "9")] pub chunk_headers_root: ::core::option::Option, - #[prost(message, optional, tag="10")] + #[prost(message, optional, tag = "10")] pub chunk_tx_root: ::core::option::Option, - #[prost(message, optional, tag="11")] + #[prost(message, optional, tag = "11")] pub outcome_root: ::core::option::Option, - #[prost(uint64, tag="12")] + #[prost(uint64, tag = "12")] pub chunks_included: u64, - #[prost(message, optional, tag="13")] + #[prost(message, optional, tag = "13")] pub challenges_root: ::core::option::Option, - #[prost(uint64, tag="14")] + #[prost(uint64, tag = "14")] pub timestamp: u64, - #[prost(uint64, tag="15")] + #[prost(uint64, tag = "15")] pub timestamp_nanosec: u64, - #[prost(message, optional, tag="16")] + #[prost(message, optional, tag = "16")] pub random_value: ::core::option::Option, - #[prost(message, repeated, tag="17")] + #[prost(message, repeated, tag = "17")] pub validator_proposals: ::prost::alloc::vec::Vec, - #[prost(bool, repeated, tag="18")] + #[prost(bool, repeated, tag = "18")] pub chunk_mask: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag="19")] + #[prost(message, optional, tag = "19")] pub gas_price: ::core::option::Option, - #[prost(uint64, tag="20")] + #[prost(uint64, tag = "20")] pub block_ordinal: u64, - #[prost(message, optional, tag="21")] + #[prost(message, optional, tag = "21")] pub total_supply: ::core::option::Option, - #[prost(message, repeated, tag="22")] + #[prost(message, repeated, tag = "22")] pub challenges_result: ::prost::alloc::vec::Vec, - #[prost(uint64, tag="23")] + #[prost(uint64, tag = "23")] pub last_final_block_height: u64, - #[prost(message, optional, tag="24")] + #[prost(message, optional, tag = "24")] pub last_final_block: ::core::option::Option, - #[prost(uint64, tag="25")] + #[prost(uint64, tag = "25")] pub last_ds_final_block_height: u64, - #[prost(message, optional, tag="26")] + #[prost(message, optional, tag = "26")] pub last_ds_final_block: ::core::option::Option, - #[prost(message, optional, tag="27")] + #[prost(message, optional, tag = "27")] pub next_bp_hash: ::core::option::Option, - #[prost(message, optional, tag="28")] + #[prost(message, optional, tag = "28")] pub block_merkle_root: ::core::option::Option, - #[prost(bytes="vec", tag="29")] + #[prost(bytes = "vec", tag = "29")] pub epoch_sync_data_hash: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag="30")] + #[prost(message, repeated, tag = "30")] pub approvals: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag="31")] + #[prost(message, optional, tag = "31")] pub signature: ::core::option::Option, - #[prost(uint32, tag="32")] + #[prost(uint32, tag = "32")] pub latest_protocol_version: u32, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BigInt { - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub bytes: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CryptoHash { - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub bytes: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Signature { - #[prost(enumeration="CurveKind", tag="1")] + #[prost(enumeration = "CurveKind", tag = "1")] pub r#type: i32, - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub bytes: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct PublicKey { - #[prost(enumeration="CurveKind", tag="1")] + #[prost(enumeration = "CurveKind", tag = "1")] pub r#type: i32, - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub bytes: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ValidatorStake { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub public_key: ::core::option::Option, - #[prost(message, optional, tag="3")] + #[prost(message, optional, tag = "3")] pub stake: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SlashedValidator { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, - #[prost(bool, tag="2")] + #[prost(bool, tag = "2")] pub is_double_sign: bool, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ChunkHeader { - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub chunk_hash: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub prev_block_hash: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="3")] + #[prost(bytes = "vec", tag = "3")] pub outcome_root: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="4")] + #[prost(bytes = "vec", tag = "4")] pub prev_state_root: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="5")] + #[prost(bytes = "vec", tag = "5")] pub encoded_merkle_root: ::prost::alloc::vec::Vec, - #[prost(uint64, tag="6")] + #[prost(uint64, tag = "6")] pub encoded_length: u64, - #[prost(uint64, tag="7")] + #[prost(uint64, tag = "7")] pub height_created: u64, - #[prost(uint64, tag="8")] + #[prost(uint64, tag = "8")] pub height_included: u64, - #[prost(uint64, tag="9")] + #[prost(uint64, tag = "9")] pub shard_id: u64, - #[prost(uint64, tag="10")] + #[prost(uint64, tag = "10")] pub gas_used: u64, - #[prost(uint64, tag="11")] + #[prost(uint64, tag = "11")] pub gas_limit: u64, - #[prost(message, optional, tag="12")] + #[prost(message, optional, tag = "12")] pub validator_reward: ::core::option::Option, - #[prost(message, optional, tag="13")] + #[prost(message, optional, tag = "13")] pub balance_burnt: ::core::option::Option, - #[prost(bytes="vec", tag="14")] + #[prost(bytes = "vec", tag = "14")] pub outgoing_receipts_root: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="15")] + #[prost(bytes = "vec", tag = "15")] pub tx_root: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag="16")] + #[prost(message, repeated, tag = "16")] pub validator_proposals: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag="17")] + #[prost(message, optional, tag = "17")] pub signature: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct IndexerShard { - #[prost(uint64, tag="1")] + #[prost(uint64, tag = "1")] pub shard_id: u64, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub chunk: ::core::option::Option, - #[prost(message, repeated, tag="3")] - pub receipt_execution_outcomes: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "3")] + pub receipt_execution_outcomes: ::prost::alloc::vec::Vec< + IndexerExecutionOutcomeWithReceipt, + >, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct IndexerExecutionOutcomeWithReceipt { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub execution_outcome: ::core::option::Option, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub receipt: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct IndexerChunk { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub author: ::prost::alloc::string::String, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub header: ::core::option::Option, - #[prost(message, repeated, tag="3")] + #[prost(message, repeated, tag = "3")] pub transactions: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag="4")] + #[prost(message, repeated, tag = "4")] pub receipts: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct IndexerTransactionWithOutcome { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub transaction: ::core::option::Option, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub outcome: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SignedTransaction { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub signer_id: ::prost::alloc::string::String, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub public_key: ::core::option::Option, - #[prost(uint64, tag="3")] + #[prost(uint64, tag = "3")] pub nonce: u64, - #[prost(string, tag="4")] + #[prost(string, tag = "4")] pub receiver_id: ::prost::alloc::string::String, - #[prost(message, repeated, tag="5")] + #[prost(message, repeated, tag = "5")] pub actions: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag="6")] + #[prost(message, optional, tag = "6")] pub signature: ::core::option::Option, - #[prost(message, optional, tag="7")] + #[prost(message, optional, tag = "7")] pub hash: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct IndexerExecutionOutcomeWithOptionalReceipt { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub execution_outcome: ::core::option::Option, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub receipt: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Receipt { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub predecessor_id: ::prost::alloc::string::String, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub receiver_id: ::prost::alloc::string::String, - #[prost(message, optional, tag="3")] + #[prost(message, optional, tag = "3")] pub receipt_id: ::core::option::Option, - #[prost(oneof="receipt::Receipt", tags="10, 11")] + #[prost(oneof = "receipt::Receipt", tags = "10, 11")] pub receipt: ::core::option::Option, } /// Nested message and enum types in `Receipt`. pub mod receipt { + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Receipt { - #[prost(message, tag="10")] + #[prost(message, tag = "10")] Action(super::ReceiptAction), - #[prost(message, tag="11")] + #[prost(message, tag = "11")] Data(super::ReceiptData), } } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReceiptData { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub data_id: ::core::option::Option, - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub data: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ReceiptAction { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub signer_id: ::prost::alloc::string::String, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub signer_public_key: ::core::option::Option, - #[prost(message, optional, tag="3")] + #[prost(message, optional, tag = "3")] pub gas_price: ::core::option::Option, - #[prost(message, repeated, tag="4")] + #[prost(message, repeated, tag = "4")] pub output_data_receivers: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag="5")] + #[prost(message, repeated, tag = "5")] pub input_data_ids: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag="6")] + #[prost(message, repeated, tag = "6")] pub actions: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DataReceiver { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub data_id: ::core::option::Option, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub receiver_id: ::prost::alloc::string::String, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExecutionOutcomeWithId { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub proof: ::core::option::Option, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub block_hash: ::core::option::Option, - #[prost(message, optional, tag="3")] + #[prost(message, optional, tag = "3")] pub id: ::core::option::Option, - #[prost(message, optional, tag="4")] + #[prost(message, optional, tag = "4")] pub outcome: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExecutionOutcome { - #[prost(string, repeated, tag="1")] + #[prost(string, repeated, tag = "1")] pub logs: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - #[prost(message, repeated, tag="2")] + #[prost(message, repeated, tag = "2")] pub receipt_ids: ::prost::alloc::vec::Vec, - #[prost(uint64, tag="3")] + #[prost(uint64, tag = "3")] pub gas_burnt: u64, - #[prost(message, optional, tag="4")] + #[prost(message, optional, tag = "4")] pub tokens_burnt: ::core::option::Option, - #[prost(string, tag="5")] + #[prost(string, tag = "5")] pub executor_id: ::prost::alloc::string::String, - #[prost(enumeration="ExecutionMetadata", tag="6")] + #[prost(enumeration = "ExecutionMetadata", tag = "6")] pub metadata: i32, - #[prost(oneof="execution_outcome::Status", tags="20, 21, 22, 23")] + #[prost(oneof = "execution_outcome::Status", tags = "20, 21, 22, 23")] pub status: ::core::option::Option, } /// Nested message and enum types in `ExecutionOutcome`. pub mod execution_outcome { + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Status { - #[prost(message, tag="20")] + #[prost(message, tag = "20")] Unknown(super::UnknownExecutionStatus), - #[prost(message, tag="21")] + #[prost(message, tag = "21")] Failure(super::FailureExecutionStatus), - #[prost(message, tag="22")] + #[prost(message, tag = "22")] SuccessValue(super::SuccessValueExecutionStatus), - #[prost(message, tag="23")] + #[prost(message, tag = "23")] SuccessReceiptId(super::SuccessReceiptIdExecutionStatus), } } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SuccessValueExecutionStatus { - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub value: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SuccessReceiptIdExecutionStatus { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub id: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct UnknownExecutionStatus { -} +pub struct UnknownExecutionStatus {} +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FailureExecutionStatus { - #[prost(oneof="failure_execution_status::Failure", tags="1, 2")] + #[prost(oneof = "failure_execution_status::Failure", tags = "1, 2")] pub failure: ::core::option::Option, } /// Nested message and enum types in `FailureExecutionStatus`. pub mod failure_execution_status { + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Failure { - #[prost(message, tag="1")] + #[prost(message, tag = "1")] ActionError(super::ActionError), - #[prost(enumeration="super::InvalidTxError", tag="2")] + #[prost(enumeration = "super::InvalidTxError", tag = "2")] InvalidTxError(i32), } } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ActionError { - #[prost(uint64, tag="1")] + #[prost(uint64, tag = "1")] pub index: u64, - #[prost(oneof="action_error::Kind", tags="21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36")] + #[prost( + oneof = "action_error::Kind", + tags = "21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36" + )] pub kind: ::core::option::Option, } /// Nested message and enum types in `ActionError`. pub mod action_error { + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Kind { - #[prost(message, tag="21")] + #[prost(message, tag = "21")] AccountAlreadyExist(super::AccountAlreadyExistsErrorKind), - #[prost(message, tag="22")] + #[prost(message, tag = "22")] AccountDoesNotExist(super::AccountDoesNotExistErrorKind), - #[prost(message, tag="23")] + #[prost(message, tag = "23")] CreateAccountOnlyByRegistrar(super::CreateAccountOnlyByRegistrarErrorKind), - #[prost(message, tag="24")] + #[prost(message, tag = "24")] CreateAccountNotAllowed(super::CreateAccountNotAllowedErrorKind), - #[prost(message, tag="25")] + #[prost(message, tag = "25")] ActorNoPermission(super::ActorNoPermissionErrorKind), - #[prost(message, tag="26")] + #[prost(message, tag = "26")] DeleteKeyDoesNotExist(super::DeleteKeyDoesNotExistErrorKind), - #[prost(message, tag="27")] + #[prost(message, tag = "27")] AddKeyAlreadyExists(super::AddKeyAlreadyExistsErrorKind), - #[prost(message, tag="28")] + #[prost(message, tag = "28")] DeleteAccountStaking(super::DeleteAccountStakingErrorKind), - #[prost(message, tag="29")] + #[prost(message, tag = "29")] LackBalanceForState(super::LackBalanceForStateErrorKind), - #[prost(message, tag="30")] + #[prost(message, tag = "30")] TriesToUnstake(super::TriesToUnstakeErrorKind), - #[prost(message, tag="31")] + #[prost(message, tag = "31")] TriesToStake(super::TriesToStakeErrorKind), - #[prost(message, tag="32")] + #[prost(message, tag = "32")] InsufficientStake(super::InsufficientStakeErrorKind), - #[prost(message, tag="33")] + #[prost(message, tag = "33")] FunctionCall(super::FunctionCallErrorKind), - #[prost(message, tag="34")] + #[prost(message, tag = "34")] NewReceiptValidation(super::NewReceiptValidationErrorKind), - #[prost(message, tag="35")] - OnlyImplicitAccountCreationAllowed(super::OnlyImplicitAccountCreationAllowedErrorKind), - #[prost(message, tag="36")] + #[prost(message, tag = "35")] + OnlyImplicitAccountCreationAllowed( + super::OnlyImplicitAccountCreationAllowedErrorKind, + ), + #[prost(message, tag = "36")] DeleteAccountWithLargeState(super::DeleteAccountWithLargeStateErrorKind), } } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccountAlreadyExistsErrorKind { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccountDoesNotExistErrorKind { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, } -//// A top-level account ID can only be created by registrar. +/// / A top-level account ID can only be created by registrar. +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CreateAccountOnlyByRegistrarErrorKind { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub registrar_account_id: ::prost::alloc::string::String, - #[prost(string, tag="3")] + #[prost(string, tag = "3")] pub predecessor_id: ::prost::alloc::string::String, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CreateAccountNotAllowedErrorKind { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub predecessor_id: ::prost::alloc::string::String, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ActorNoPermissionErrorKind { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub actor_id: ::prost::alloc::string::String, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteKeyDoesNotExistErrorKind { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub public_key: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AddKeyAlreadyExistsErrorKind { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub public_key: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteAccountStakingErrorKind { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct LackBalanceForStateErrorKind { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub balance: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TriesToUnstakeErrorKind { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TriesToStakeErrorKind { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub stake: ::core::option::Option, - #[prost(message, optional, tag="3")] + #[prost(message, optional, tag = "3")] pub locked: ::core::option::Option, - #[prost(message, optional, tag="4")] + #[prost(message, optional, tag = "4")] pub balance: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct InsufficientStakeErrorKind { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub stake: ::core::option::Option, - #[prost(message, optional, tag="3")] + #[prost(message, optional, tag = "3")] pub minimum_stake: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FunctionCallErrorKind { - #[prost(enumeration="FunctionCallErrorSer", tag="1")] + #[prost(enumeration = "FunctionCallErrorSer", tag = "1")] pub error: i32, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct NewReceiptValidationErrorKind { - #[prost(enumeration="ReceiptValidationError", tag="1")] + #[prost(enumeration = "ReceiptValidationError", tag = "1")] pub error: i32, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct OnlyImplicitAccountCreationAllowedErrorKind { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteAccountWithLargeStateErrorKind { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub account_id: ::prost::alloc::string::String, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MerklePath { - #[prost(message, repeated, tag="1")] + #[prost(message, repeated, tag = "1")] pub path: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MerklePathItem { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub hash: ::core::option::Option, - #[prost(enumeration="Direction", tag="2")] + #[prost(enumeration = "Direction", tag = "2")] pub direction: i32, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Action { - #[prost(oneof="action::Action", tags="1, 2, 3, 4, 5, 6, 7, 8")] + #[prost(oneof = "action::Action", tags = "1, 2, 3, 4, 5, 6, 7, 8")] pub action: ::core::option::Option, } /// Nested message and enum types in `Action`. pub mod action { + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Action { - #[prost(message, tag="1")] + #[prost(message, tag = "1")] CreateAccount(super::CreateAccountAction), - #[prost(message, tag="2")] + #[prost(message, tag = "2")] DeployContract(super::DeployContractAction), - #[prost(message, tag="3")] + #[prost(message, tag = "3")] FunctionCall(super::FunctionCallAction), - #[prost(message, tag="4")] + #[prost(message, tag = "4")] Transfer(super::TransferAction), - #[prost(message, tag="5")] + #[prost(message, tag = "5")] Stake(super::StakeAction), - #[prost(message, tag="6")] + #[prost(message, tag = "6")] AddKey(super::AddKeyAction), - #[prost(message, tag="7")] + #[prost(message, tag = "7")] DeleteKey(super::DeleteKeyAction), - #[prost(message, tag="8")] + #[prost(message, tag = "8")] DeleteAccount(super::DeleteAccountAction), } } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateAccountAction { -} +pub struct CreateAccountAction {} +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeployContractAction { - #[prost(bytes="vec", tag="1")] + #[prost(bytes = "vec", tag = "1")] pub code: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FunctionCallAction { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub method_name: ::prost::alloc::string::String, - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub args: ::prost::alloc::vec::Vec, - #[prost(uint64, tag="3")] + #[prost(uint64, tag = "3")] pub gas: u64, - #[prost(message, optional, tag="4")] + #[prost(message, optional, tag = "4")] pub deposit: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TransferAction { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub deposit: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct StakeAction { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub stake: ::core::option::Option, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub public_key: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AddKeyAction { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub public_key: ::core::option::Option, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub access_key: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteKeyAction { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub public_key: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DeleteAccountAction { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub beneficiary_id: ::prost::alloc::string::String, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccessKey { - #[prost(uint64, tag="1")] + #[prost(uint64, tag = "1")] pub nonce: u64, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub permission: ::core::option::Option, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AccessKeyPermission { - #[prost(oneof="access_key_permission::Permission", tags="1, 2")] + #[prost(oneof = "access_key_permission::Permission", tags = "1, 2")] pub permission: ::core::option::Option, } /// Nested message and enum types in `AccessKeyPermission`. pub mod access_key_permission { + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Permission { - #[prost(message, tag="1")] + #[prost(message, tag = "1")] FunctionCall(super::FunctionCallPermission), - #[prost(message, tag="2")] + #[prost(message, tag = "2")] FullAccess(super::FullAccessPermission), } } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct FunctionCallPermission { - #[prost(message, optional, tag="1")] + #[prost(message, optional, tag = "1")] pub allowance: ::core::option::Option, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub receiver_id: ::prost::alloc::string::String, - #[prost(string, repeated, tag="3")] + #[prost(string, repeated, tag = "3")] pub method_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct FullAccessPermission { -} +pub struct FullAccessPermission {} #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum CurveKind { Ed25519 = 0, Secp256k1 = 1, } +impl CurveKind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + CurveKind::Ed25519 => "ED25519", + CurveKind::Secp256k1 => "SECP256K1", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ED25519" => Some(Self::Ed25519), + "SECP256K1" => Some(Self::Secp256k1), + _ => None, + } + } +} #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum ExecutionMetadata { V1 = 0, } -///todo: add more detail? +impl ExecutionMetadata { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ExecutionMetadata::V1 => "ExecutionMetadataV1", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ExecutionMetadataV1" => Some(Self::V1), + _ => None, + } + } +} +/// todo: add more detail? #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum FunctionCallErrorSer { @@ -811,7 +936,39 @@ pub enum FunctionCallErrorSer { EvmError = 6, ExecutionError = 7, } -///todo: add more detail? +impl FunctionCallErrorSer { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + FunctionCallErrorSer::CompilationError => "CompilationError", + FunctionCallErrorSer::LinkError => "LinkError", + FunctionCallErrorSer::MethodResolveError => "MethodResolveError", + FunctionCallErrorSer::WasmTrap => "WasmTrap", + FunctionCallErrorSer::WasmUnknownError => "WasmUnknownError", + FunctionCallErrorSer::HostError => "HostError", + FunctionCallErrorSer::EvmError => "_EVMError", + FunctionCallErrorSer::ExecutionError => "ExecutionError", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "CompilationError" => Some(Self::CompilationError), + "LinkError" => Some(Self::LinkError), + "MethodResolveError" => Some(Self::MethodResolveError), + "WasmTrap" => Some(Self::WasmTrap), + "WasmUnknownError" => Some(Self::WasmUnknownError), + "HostError" => Some(Self::HostError), + "_EVMError" => Some(Self::EvmError), + "ExecutionError" => Some(Self::ExecutionError), + _ => None, + } + } +} +/// todo: add more detail? #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum ReceiptValidationError { @@ -823,7 +980,45 @@ pub enum ReceiptValidationError { NumberInputDataDependenciesExceeded = 5, ActionsValidationError = 6, } -///todo: add more detail? +impl ReceiptValidationError { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ReceiptValidationError::InvalidPredecessorId => "InvalidPredecessorId", + ReceiptValidationError::InvalidReceiverAccountId => { + "InvalidReceiverAccountId" + } + ReceiptValidationError::InvalidSignerAccountId => "InvalidSignerAccountId", + ReceiptValidationError::InvalidDataReceiverId => "InvalidDataReceiverId", + ReceiptValidationError::ReturnedValueLengthExceeded => { + "ReturnedValueLengthExceeded" + } + ReceiptValidationError::NumberInputDataDependenciesExceeded => { + "NumberInputDataDependenciesExceeded" + } + ReceiptValidationError::ActionsValidationError => "ActionsValidationError", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "InvalidPredecessorId" => Some(Self::InvalidPredecessorId), + "InvalidReceiverAccountId" => Some(Self::InvalidReceiverAccountId), + "InvalidSignerAccountId" => Some(Self::InvalidSignerAccountId), + "InvalidDataReceiverId" => Some(Self::InvalidDataReceiverId), + "ReturnedValueLengthExceeded" => Some(Self::ReturnedValueLengthExceeded), + "NumberInputDataDependenciesExceeded" => { + Some(Self::NumberInputDataDependenciesExceeded) + } + "ActionsValidationError" => Some(Self::ActionsValidationError), + _ => None, + } + } +} +/// todo: add more detail? #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum InvalidTxError { @@ -842,9 +1037,73 @@ pub enum InvalidTxError { ActionsValidation = 12, TransactionSizeExceeded = 13, } +impl InvalidTxError { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + InvalidTxError::InvalidAccessKeyError => "InvalidAccessKeyError", + InvalidTxError::InvalidSignerId => "InvalidSignerId", + InvalidTxError::SignerDoesNotExist => "SignerDoesNotExist", + InvalidTxError::InvalidNonce => "InvalidNonce", + InvalidTxError::NonceTooLarge => "NonceTooLarge", + InvalidTxError::InvalidReceiverId => "InvalidReceiverId", + InvalidTxError::InvalidSignature => "InvalidSignature", + InvalidTxError::NotEnoughBalance => "NotEnoughBalance", + InvalidTxError::LackBalanceForState => "LackBalanceForState", + InvalidTxError::CostOverflow => "CostOverflow", + InvalidTxError::InvalidChain => "InvalidChain", + InvalidTxError::Expired => "Expired", + InvalidTxError::ActionsValidation => "ActionsValidation", + InvalidTxError::TransactionSizeExceeded => "TransactionSizeExceeded", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "InvalidAccessKeyError" => Some(Self::InvalidAccessKeyError), + "InvalidSignerId" => Some(Self::InvalidSignerId), + "SignerDoesNotExist" => Some(Self::SignerDoesNotExist), + "InvalidNonce" => Some(Self::InvalidNonce), + "NonceTooLarge" => Some(Self::NonceTooLarge), + "InvalidReceiverId" => Some(Self::InvalidReceiverId), + "InvalidSignature" => Some(Self::InvalidSignature), + "NotEnoughBalance" => Some(Self::NotEnoughBalance), + "LackBalanceForState" => Some(Self::LackBalanceForState), + "CostOverflow" => Some(Self::CostOverflow), + "InvalidChain" => Some(Self::InvalidChain), + "Expired" => Some(Self::Expired), + "ActionsValidation" => Some(Self::ActionsValidation), + "TransactionSizeExceeded" => Some(Self::TransactionSizeExceeded), + _ => None, + } + } +} #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Direction { Left = 0, Right = 1, } +impl Direction { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Direction::Left => "left", + Direction::Right => "right", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "left" => Some(Self::Left), + "right" => Some(Self::Right), + _ => None, + } + } +} diff --git a/chain/near/src/runtime/abi.rs b/chain/near/src/runtime/abi.rs index fdd3be12e6f..af3fad66d4d 100644 --- a/chain/near/src/runtime/abi.rs +++ b/chain/near/src/runtime/abi.rs @@ -630,7 +630,7 @@ impl ToAscObj for codec::BigInt { gas: &GasCounter, ) -> Result { // Bytes are reversed to align with BigInt bytes endianess - let reversed: Vec = self.bytes.iter().rev().map(|x| *x).collect(); + let reversed: Vec = self.bytes.iter().rev().copied().collect(); reversed.to_asc_obj(heap, gas) } diff --git a/chain/near/src/trigger.rs b/chain/near/src/trigger.rs index 049cce52bc8..6fc31e8aefe 100644 --- a/chain/near/src/trigger.rs +++ b/chain/near/src/trigger.rs @@ -393,7 +393,7 @@ mod tests { fn big_int(input: u64) -> Option { let value = - BigInt::try_from(input).expect(format!("Invalid BigInt value {}", input).as_ref()); + BigInt::try_from(input).unwrap_or_else(|_| panic!("Invalid BigInt value {}", input)); let bytes = value.to_signed_bytes_le(); Some(codec::BigInt { bytes }) @@ -401,21 +401,23 @@ mod tests { fn hash(input: &str) -> Option { Some(codec::CryptoHash { - bytes: hex::decode(input).expect(format!("Invalid hash value {}", input).as_ref()), + bytes: hex::decode(input).unwrap_or_else(|_| panic!("Invalid hash value {}", input)), }) } fn public_key(input: &str) -> Option { Some(codec::PublicKey { r#type: 0, - bytes: hex::decode(input).expect(format!("Invalid PublicKey value {}", input).as_ref()), + bytes: hex::decode(input) + .unwrap_or_else(|_| panic!("Invalid PublicKey value {}", input)), }) } fn signature(input: &str) -> Option { Some(codec::Signature { r#type: 0, - bytes: hex::decode(input).expect(format!("Invalid Signature value {}", input).as_ref()), + bytes: hex::decode(input) + .unwrap_or_else(|_| panic!("Invalid Signature value {}", input)), }) } diff --git a/chain/substreams/Cargo.toml b/chain/substreams/Cargo.toml index bc9bdbca51a..ad557e27c4f 100644 --- a/chain/substreams/Cargo.toml +++ b/chain/substreams/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "graph-chain-substreams" -version = "0.27.0" -edition = "2021" +version.workspace = true +edition.workspace = true [build-dependencies] -tonic-build = { version = "0.7.2", features = ["prost"] } +tonic-build = { workspace = true } [dependencies] async-stream = "0.3" @@ -16,16 +16,17 @@ graph = { path = "../../graph" } graph-runtime-wasm = { path = "../../runtime/wasm" } lazy_static = "1.2.0" serde = "1.0" -prost = "0.10.4" -prost-types = "0.10.1" +prost = { workspace = true } +prost-types = { workspace = true } dirs-next = "2.0" anyhow = "1.0" tiny-keccak = "1.5.0" hex = "0.4.3" -semver = "1.0.12" +semver = "1.0.16" +base64 = "0.20.0" -itertools = "0.10.3" +itertools = "0.10.5" [dev-dependencies] graph-core = { path = "../../core" } -tokio = { version = "1", features = ["full"]} +tokio = { version = "1", features = ["full"] } diff --git a/chain/substreams/build.rs b/chain/substreams/build.rs index 8998174122b..8cccc11fe3a 100644 --- a/chain/substreams/build.rs +++ b/chain/substreams/build.rs @@ -1,7 +1,8 @@ fn main() { println!("cargo:rerun-if-changed=proto"); tonic_build::configure() + .protoc_arg("--experimental_allow_proto3_optional") .out_dir("src/protobuf") - .compile(&["codec.proto"], &["proto"]) + .compile(&["proto/codec.proto"], &["proto"]) .expect("Failed to compile Substreams entity proto(s)"); } diff --git a/chain/substreams/examples/substreams.rs b/chain/substreams/examples/substreams.rs index bc7af4acb5f..a6f74692f52 100644 --- a/chain/substreams/examples/substreams.rs +++ b/chain/substreams/examples/substreams.rs @@ -1,14 +1,10 @@ use anyhow::{format_err, Context, Error}; use graph::blockchain::block_stream::BlockStreamEvent; use graph::blockchain::substreams_block_stream::SubstreamsBlockStream; +use graph::firehose::SubgraphLimit; use graph::prelude::{info, tokio, DeploymentHash, Registry}; use graph::tokio_stream::StreamExt; -use graph::{ - env::env_var, - firehose::FirehoseEndpoint, - log::logger, - substreams::{self}, -}; +use graph::{env::env_var, firehose::FirehoseEndpoint, log::logger, substreams}; use graph_chain_substreams::mapper::Mapper; use graph_core::MetricsRegistry; use prost::Message; @@ -21,17 +17,17 @@ async fn main() -> Result<(), Error> { let token_env = env_var("SUBSTREAMS_API_TOKEN", "".to_string()); let mut token: Option = None; - if token_env.len() > 0 { + if !token_env.is_empty() { token = Some(token_env); } let endpoint = env_var( "SUBSTREAMS_ENDPOINT", - "https://api-dev.streamingfast.io".to_string(), + "https://api.streamingfast.io".to_string(), ); let package_file = env_var("SUBSTREAMS_PACKAGE", "".to_string()); - if package_file == "" { + if package_file.is_empty() { panic!("Environment variable SUBSTREAMS_PACKAGE must be set"); } @@ -51,7 +47,7 @@ async fn main() -> Result<(), Error> { token, false, false, - 1, + SubgraphLimit::Unlimited, )); let mut stream: SubstreamsBlockStream = @@ -79,17 +75,9 @@ async fn main() -> Result<(), Error> { Ok(block_stream_event) => match block_stream_event { BlockStreamEvent::Revert(_, _) => {} BlockStreamEvent::ProcessBlock(block_with_trigger, _) => { - let changes = block_with_trigger.block; - for change in changes.entity_changes { - info!(&logger, "----- Entity -----"); - info!( - &logger, - "name: {} operation: {}", change.entity, change.operation - ); + for change in block_with_trigger.block.changes.entity_changes { for field in change.fields { - info!(&logger, "field: {}, type: {}", field.name, field.value_type); - info!(&logger, "new value: {}", hex::encode(field.new_value)); - info!(&logger, "old value: {}", hex::encode(field.old_value)); + info!(&logger, "field: {:?}", field); } } } diff --git a/chain/substreams/proto/codec.proto b/chain/substreams/proto/codec.proto index 4320cb7c561..a24dcb97310 100644 --- a/chain/substreams/proto/codec.proto +++ b/chain/substreams/proto/codec.proto @@ -2,20 +2,16 @@ syntax = "proto3"; package substreams.entity.v1; -message EntitiesChanges { - bytes block_id = 1; - uint64 block_number = 2; - bytes prev_block_id = 3; - uint64 prev_block_number = 4; - repeated EntityChange entityChanges = 5; +message EntityChanges { + repeated EntityChange entity_changes = 5; } message EntityChange { string entity = 1; - bytes id = 2; + string id = 2; uint64 ordinal = 3; enum Operation { - UNSET = 0; // Protobuf default should not be used, this is used so that the consume can ensure that the value was actually specified + UNSET = 0; // Protobuf default should not be used, this is used so that the consume can ensure that the value was actually specified CREATE = 1; UPDATE = 2; DELETE = 3; @@ -24,19 +20,27 @@ message EntityChange { repeated Field fields = 5; } +message Value { + oneof typed { + int32 int32 = 1; + string bigdecimal = 2; + string bigint = 3; + string string = 4; + bytes bytes = 5; + bool bool = 6; + + //reserved 7 to 9; // For future types + + Array array = 10; + } +} + +message Array { + repeated Value value = 1; +} + message Field { string name = 1; - enum Type { - UNSET = 0; // Protobuf default should not be used, this is used so that the consume can ensure that the value was actually specified - BIGDECIMAL = 1; - BIGINT = 2; - INT = 3; // int32 - BYTES = 4; - STRING = 5; - } - Type value_type = 2; - bytes new_value = 3; - bool new_value_null = 4; - bytes old_value = 5; - bool old_value_null = 6; + optional Value new_value = 3; + optional Value old_value = 5; } diff --git a/chain/substreams/src/block_stream.rs b/chain/substreams/src/block_stream.rs index ef8409a6cac..80a402f7343 100644 --- a/chain/substreams/src/block_stream.rs +++ b/chain/substreams/src/block_stream.rs @@ -35,14 +35,11 @@ impl BlockStreamBuilderTrait for BlockStreamBuilder { deployment: DeploymentLocator, block_cursor: FirehoseCursor, _start_blocks: Vec, - _subgraph_current_block: Option, + subgraph_current_block: Option, filter: Arc, _unified_api_version: UnifiedMappingApiVersion, ) -> Result>> { - let firehose_endpoint = match chain.endpoints.random() { - Some(e) => e.clone(), - None => return Err(anyhow::format_err!("no firehose endpoint available")), - }; + let firehose_endpoint = chain.endpoints.random()?; let mapper = Arc::new(Mapper {}); @@ -54,7 +51,7 @@ impl BlockStreamBuilderTrait for BlockStreamBuilder { Ok(Box::new(SubstreamsBlockStream::new( deployment.hash, firehose_endpoint, - None, + subgraph_current_block, block_cursor.as_ref().clone(), mapper, filter.modules.clone(), diff --git a/chain/substreams/src/chain.rs b/chain/substreams/src/chain.rs index 68a73891b2c..7c1dbc0fc66 100644 --- a/chain/substreams/src/chain.rs +++ b/chain/substreams/src/chain.rs @@ -1,6 +1,6 @@ -use crate::{data_source::*, Block, TriggerData, TriggerFilter, TriggersAdapter}; +use crate::{data_source::*, EntityChanges, TriggerData, TriggerFilter, TriggersAdapter}; use anyhow::Error; -use core::fmt; +use graph::blockchain::EmptyNodeCapabilities; use graph::firehose::FirehoseEndpoints; use graph::prelude::{BlockHash, LoggerFactory, MetricsRegistry}; use graph::{ @@ -11,25 +11,28 @@ use graph::{ }, components::store::DeploymentLocator, data::subgraph::UnifiedMappingApiVersion, - impl_slog_value, prelude::{async_trait, BlockNumber, ChainStore}, slog::Logger, }; -use std::{str::FromStr, sync::Arc}; +use std::sync::Arc; + +#[derive(Default, Debug, Clone)] +pub struct Block { + pub hash: BlockHash, + pub number: BlockNumber, + pub changes: EntityChanges, +} impl blockchain::Block for Block { fn ptr(&self) -> BlockPtr { - return BlockPtr { - hash: BlockHash(Box::from(self.block_id.clone())), - number: self.block_number as i32, - }; + BlockPtr { + hash: self.hash.clone(), + number: self.number, + } } fn parent_ptr(&self) -> Option { - Some(BlockPtr { - hash: BlockHash(Box::from(self.prev_block_id.clone())), - number: self.prev_block_number as i32, - }) + None } } @@ -66,31 +69,6 @@ impl std::fmt::Debug for Chain { } } -#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Ord, Eq)] -pub struct NodeCapabilities {} - -impl FromStr for NodeCapabilities { - type Err = Error; - - fn from_str(_s: &str) -> Result { - Ok(NodeCapabilities {}) - } -} - -impl fmt::Display for NodeCapabilities { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("substream") - } -} - -impl_slog_value!(NodeCapabilities, "{}"); - -impl graph::blockchain::NodeCapabilities for NodeCapabilities { - fn from_data_sources(_data_sources: &[DataSource]) -> Self { - NodeCapabilities {} - } -} - #[async_trait] impl Blockchain for Chain { const KIND: BlockchainKind = BlockchainKind::Substreams; @@ -112,7 +90,7 @@ impl Blockchain for Chain { /// Trigger filter used as input to the triggers adapter. type TriggerFilter = TriggerFilter; - type NodeCapabilities = NodeCapabilities; + type NodeCapabilities = EmptyNodeCapabilities; fn triggers_adapter( &self, @@ -145,6 +123,17 @@ impl Blockchain for Chain { .await } + fn is_refetch_block_required(&self) -> bool { + false + } + async fn refetch_firehose_block( + &self, + _logger: &Logger, + _cursor: FirehoseCursor, + ) -> Result { + unimplemented!("This chain does not support Dynamic Data Sources. is_refetch_block_required always returns false, this shouldn't be called.") + } + async fn new_polling_block_stream( &self, _deployment: DeploymentLocator, diff --git a/chain/substreams/src/data_source.rs b/chain/substreams/src/data_source.rs index d4b73708f52..9e3389189ef 100644 --- a/chain/substreams/src/data_source.rs +++ b/chain/substreams/src/data_source.rs @@ -19,7 +19,7 @@ pub const SUBSTREAMS_KIND: &str = "substreams"; const DYNAMIC_DATA_SOURCE_ERROR: &str = "Substreams do not support dynamic data sources"; const TEMPLATE_ERROR: &str = "Substreams do not support templates"; -const ALLOWED_MAPPING_KIND: [&'static str; 1] = ["substreams/graph-entities"]; +const ALLOWED_MAPPING_KIND: [&str; 1] = ["substreams/graph-entities"]; #[derive(Clone, Debug, PartialEq)] /// Represents the DataSource portion of the manifest once it has been parsed @@ -34,15 +34,11 @@ pub struct DataSource { pub initial_block: Option, } -impl TryFrom> for DataSource { - type Error = anyhow::Error; - - fn try_from(_value: DataSourceTemplateInfo) -> Result { +impl blockchain::DataSource for DataSource { + fn from_template_info(_template_info: DataSourceTemplateInfo) -> Result { Err(anyhow!("Substreams does not support templates")) } -} -impl blockchain::DataSource for DataSource { fn address(&self) -> Option<&[u8]> { None } @@ -60,7 +56,7 @@ impl blockchain::DataSource for DataSource { } fn network(&self) -> Option<&str> { - self.network.as_ref().map(|s| s.as_str()) + self.network.as_deref() } fn context(&self) -> Arc> { diff --git a/chain/substreams/src/lib.rs b/chain/substreams/src/lib.rs index 68fa97ca189..60215c453cc 100644 --- a/chain/substreams/src/lib.rs +++ b/chain/substreams/src/lib.rs @@ -8,9 +8,8 @@ pub mod mapper; pub use block_stream::BlockStreamBuilder; pub use chain::*; -pub use codec::EntitiesChanges as Block; +pub use codec::EntityChanges; pub use data_source::*; pub use trigger::*; -pub use codec::field::Type as FieldType; pub use codec::Field; diff --git a/chain/substreams/src/mapper.rs b/chain/substreams/src/mapper.rs index 48c61a819d2..e9d5ba06862 100644 --- a/chain/substreams/src/mapper.rs +++ b/chain/substreams/src/mapper.rs @@ -1,13 +1,13 @@ -use crate::{Block, Chain, TriggerData}; +use crate::{Block, Chain, EntityChanges, TriggerData}; use graph::blockchain::block_stream::SubstreamsError::{ MultipleModuleOutputError, UnexpectedStoreDeltaOutput, }; use graph::blockchain::block_stream::{ BlockStreamEvent, BlockWithTriggers, FirehoseCursor, SubstreamsError, SubstreamsMapper, }; -use graph::prelude::{async_trait, BlockNumber, BlockPtr, Logger}; +use graph::prelude::{async_trait, BlockHash, BlockNumber, BlockPtr, Logger}; use graph::substreams::module_output::Data; -use graph::substreams::{BlockScopedData, ForkStep}; +use graph::substreams::{BlockScopedData, Clock, ForkStep}; use prost::Message; pub struct Mapper {} @@ -16,31 +16,53 @@ pub struct Mapper {} impl SubstreamsMapper for Mapper { async fn to_block_stream_event( &self, - _logger: &Logger, + logger: &Logger, block_scoped_data: &BlockScopedData, ) -> Result>, SubstreamsError> { - let step = ForkStep::from_i32(block_scoped_data.step).unwrap_or_else(|| { + let BlockScopedData { + outputs, + clock, + step, + cursor: _, + } = block_scoped_data; + + let step = ForkStep::from_i32(*step).unwrap_or_else(|| { panic!( "unknown step i32 value {}, maybe you forgot update & re-regenerate the protobuf definitions?", - block_scoped_data.step + step ) }); - if block_scoped_data.outputs.len() == 0 { + if outputs.is_empty() { return Ok(None); } - if block_scoped_data.outputs.len() > 1 { - return Err(MultipleModuleOutputError()); + if outputs.len() > 1 { + return Err(MultipleModuleOutputError); } //todo: handle step let module_output = &block_scoped_data.outputs[0]; let cursor = &block_scoped_data.cursor; - match module_output.data.as_ref().unwrap() { - Data::MapOutput(msg) => { - let changes: Block = Message::decode(msg.value.as_slice()).unwrap(); + let clock = match clock { + Some(clock) => clock, + None => return Err(SubstreamsError::MissingClockError), + }; + + let Clock { + id: hash, + number, + timestamp: _, + } = clock; + + let hash: BlockHash = hash.as_str().try_into()?; + let number: BlockNumber = *number as BlockNumber; + + match module_output.data.as_ref() { + Some(Data::MapOutput(msg)) => { + let changes: EntityChanges = Message::decode(msg.value.as_slice()) + .map_err(SubstreamsError::DecodingError)?; use ForkStep::*; match step { @@ -52,14 +74,19 @@ impl SubstreamsMapper for Mapper { // TODO(filipe): Fix once either trigger data can be empty // or we move the changes into trigger data. - BlockWithTriggers::new(changes, vec![TriggerData {}]), + BlockWithTriggers::new( + Block { + hash, + number, + changes, + }, + vec![TriggerData {}], + logger, + ), FirehoseCursor::from(cursor.clone()), ))), StepUndo => { - let parent_ptr = BlockPtr { - hash: changes.prev_block_id.clone().into(), - number: changes.prev_block_number as BlockNumber, - }; + let parent_ptr = BlockPtr { hash, number }; Ok(Some(BlockStreamEvent::Revert( parent_ptr, @@ -71,7 +98,8 @@ impl SubstreamsMapper for Mapper { } } } - Data::StoreDeltas(_) => Err(UnexpectedStoreDeltaOutput()), + Some(Data::DebugStoreDeltas(_)) => Err(UnexpectedStoreDeltaOutput), + _ => Err(SubstreamsError::ModuleOutputNotPresentOrUnexpected), } } } diff --git a/chain/substreams/src/protobuf/substreams.entity.v1.rs b/chain/substreams/src/protobuf/substreams.entity.v1.rs index 94354c7af95..47368e25fba 100644 --- a/chain/substreams/src/protobuf/substreams.entity.v1.rs +++ b/chain/substreams/src/protobuf/substreams.entity.v1.rs @@ -1,32 +1,36 @@ +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct EntitiesChanges { - #[prost(bytes="vec", tag="1")] - pub block_id: ::prost::alloc::vec::Vec, - #[prost(uint64, tag="2")] - pub block_number: u64, - #[prost(bytes="vec", tag="3")] - pub prev_block_id: ::prost::alloc::vec::Vec, - #[prost(uint64, tag="4")] - pub prev_block_number: u64, - #[prost(message, repeated, tag="5")] +pub struct EntityChanges { + #[prost(message, repeated, tag = "5")] pub entity_changes: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EntityChange { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub entity: ::prost::alloc::string::String, - #[prost(bytes="vec", tag="2")] - pub id: ::prost::alloc::vec::Vec, - #[prost(uint64, tag="3")] + #[prost(string, tag = "2")] + pub id: ::prost::alloc::string::String, + #[prost(uint64, tag = "3")] pub ordinal: u64, - #[prost(enumeration="entity_change::Operation", tag="4")] + #[prost(enumeration = "entity_change::Operation", tag = "4")] pub operation: i32, - #[prost(message, repeated, tag="5")] + #[prost(message, repeated, tag = "5")] pub fields: ::prost::alloc::vec::Vec, } /// Nested message and enum types in `EntityChange`. pub mod entity_change { - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum Operation { /// Protobuf default should not be used, this is used so that the consume can ensure that the value was actually specified @@ -35,34 +39,71 @@ pub mod entity_change { Update = 2, Delete = 3, } + impl Operation { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Operation::Unset => "UNSET", + Operation::Create => "CREATE", + Operation::Update => "UPDATE", + Operation::Delete => "DELETE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNSET" => Some(Self::Unset), + "CREATE" => Some(Self::Create), + "UPDATE" => Some(Self::Update), + "DELETE" => Some(Self::Delete), + _ => None, + } + } + } } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct Field { - #[prost(string, tag="1")] - pub name: ::prost::alloc::string::String, - #[prost(enumeration="field::Type", tag="2")] - pub value_type: i32, - #[prost(bytes="vec", tag="3")] - pub new_value: ::prost::alloc::vec::Vec, - #[prost(bool, tag="4")] - pub new_value_null: bool, - #[prost(bytes="vec", tag="5")] - pub old_value: ::prost::alloc::vec::Vec, - #[prost(bool, tag="6")] - pub old_value_null: bool, +pub struct Value { + #[prost(oneof = "value::Typed", tags = "1, 2, 3, 4, 5, 6, 10")] + pub typed: ::core::option::Option, } -/// Nested message and enum types in `Field`. -pub mod field { - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] - #[repr(i32)] - pub enum Type { - /// Protobuf default should not be used, this is used so that the consume can ensure that the value was actually specified - Unset = 0, - Bigdecimal = 1, - Bigint = 2, - /// int32 - Int = 3, - Bytes = 4, - String = 5, +/// Nested message and enum types in `Value`. +pub mod value { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Typed { + #[prost(int32, tag = "1")] + Int32(i32), + #[prost(string, tag = "2")] + Bigdecimal(::prost::alloc::string::String), + #[prost(string, tag = "3")] + Bigint(::prost::alloc::string::String), + #[prost(string, tag = "4")] + String(::prost::alloc::string::String), + #[prost(bytes, tag = "5")] + Bytes(::prost::alloc::vec::Vec), + #[prost(bool, tag = "6")] + Bool(bool), + #[prost(message, tag = "10")] + Array(super::Array), } } +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Array { + #[prost(message, repeated, tag = "1")] + pub value: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Field { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(message, optional, tag = "3")] + pub new_value: ::core::option::Option, + #[prost(message, optional, tag = "5")] + pub old_value: ::core::option::Option, +} diff --git a/chain/substreams/src/trigger.rs b/chain/substreams/src/trigger.rs index 22e0ace272d..67dc4dc6bd4 100644 --- a/chain/substreams/src/trigger.rs +++ b/chain/substreams/src/trigger.rs @@ -1,14 +1,14 @@ -use std::{collections::HashMap, sync::Arc}; +use std::{collections::HashMap, str::FromStr, sync::Arc}; use anyhow::Error; use graph::{ - blockchain::{self, block_stream::BlockWithTriggers, BlockPtr}, + blockchain::{self, block_stream::BlockWithTriggers, BlockPtr, EmptyNodeCapabilities}, components::{ - store::{DeploymentLocator, EntityKey, SubgraphFork}, + store::{DeploymentLocator, EntityKey, EntityType, SubgraphFork}, subgraph::{MappingError, ProofOfIndexingEvent, SharedProofOfIndexing}, }, data::store::scalar::Bytes, - data_source, + data_source::{self, CausalityRegion}, prelude::{ anyhow, async_trait, BigDecimal, BigInt, BlockHash, BlockNumber, BlockState, Entity, RuntimeHostBuilder, Value, @@ -19,11 +19,8 @@ use graph::{ use graph_runtime_wasm::module::ToAscPtr; use lazy_static::__Deref; -use crate::codec::Field; -use crate::{ - codec::{entity_change::Operation, field::Type}, - Block, Chain, NodeCapabilities, NoopDataSourceTemplate, -}; +use crate::codec; +use crate::{codec::entity_change::Operation, Block, Chain, NoopDataSourceTemplate}; #[derive(Eq, PartialEq, PartialOrd, Ord, Debug)] pub struct TriggerData {} @@ -77,7 +74,7 @@ impl blockchain::TriggerFilter for TriggerFilter { return; } - if let Some(ref ds) = data_sources.next() { + if let Some(ds) = data_sources.next() { *data_sources_len = 1; *modules = ds.source.package.modules.clone(); *module_name = ds.source.module_name.clone(); @@ -85,8 +82,8 @@ impl blockchain::TriggerFilter for TriggerFilter { } } - fn node_capabilities(&self) -> NodeCapabilities { - NodeCapabilities {} + fn node_capabilities(&self) -> EmptyNodeCapabilities { + EmptyNodeCapabilities::default() } fn to_firehose_filter(self) -> Vec { @@ -176,7 +173,7 @@ where _debug_fork: &Option>, _subgraph_metrics: &Arc, ) -> Result, MappingError> { - for entity_change in block.entity_changes.iter() { + for entity_change in block.changes.entity_changes.iter() { match entity_change.operation() { Operation::Unset => { // Potentially an issue with the server side or @@ -184,26 +181,31 @@ where return Err(MappingError::Unknown(anyhow!("Detected UNSET entity operation, either a server error or there's a new type of operation and we're running an outdated protobuf"))); } Operation::Create | Operation::Update => { - // TODO(filipe): Remove this once the substreams GRPC has been fixed. - let entity_type: &str = { - let letter: String = entity_change.entity[0..1].to_uppercase(); - &(letter + &entity_change.entity[1..]) + let entity_type: &str = &entity_change.entity; + let entity_id: String = entity_change.id.clone(); + let key = EntityKey { + entity_type: EntityType::new(entity_type.to_string()), + entity_id: entity_id.clone().into(), + causality_region: CausalityRegion::ONCHAIN, // Substreams don't currently support offchain data }; - let entity_id: String = String::from_utf8(entity_change.id.clone()) - .map_err(|e| MappingError::Unknown(anyhow::Error::from(e)))?; - let key = EntityKey::data(entity_type.to_string(), entity_id.clone()); let mut data: HashMap = HashMap::from_iter(vec![]); + for field in entity_change.fields.iter() { - let value: Value = decode_entity_change(&field, &entity_change.entity)?; - *data - .entry(field.name.as_str().to_owned()) - .or_insert(Value::Null) = value; + let new_value: &codec::value::Typed = match &field.new_value { + Some(codec::Value { + typed: Some(new_value), + }) => new_value, + _ => continue, + }; + + let value: Value = decode_value(new_value)?; + *data.entry(field.name.clone()).or_insert(Value::Null) = value; } write_poi_event( proof_of_indexing, &ProofOfIndexingEvent::SetEntity { - entity_type: &entity_type, + entity_type, id: &entity_id, data: &data, }, @@ -215,9 +217,12 @@ where } Operation::Delete => { let entity_type: &str = &entity_change.entity; - let entity_id: String = String::from_utf8(entity_change.id.clone()) - .map_err(|e| MappingError::Unknown(anyhow::Error::from(e)))?; - let key = EntityKey::data(entity_type.to_string(), entity_id.clone()); + let entity_id: String = entity_change.id.clone(); + let key = EntityKey { + entity_type: EntityType::new(entity_type.to_string()), + entity_id: entity_id.clone().into(), + causality_region: CausalityRegion::ONCHAIN, // Substreams don't currently support offchain data + }; state.entity_cache.remove(key); @@ -238,89 +243,92 @@ where } } -fn decode_entity_change(field: &Field, entity: &String) -> Result { - match field.value_type() { - Type::Unset => { - return Err(MappingError::Unknown(anyhow!( - "Invalid field type, the protobuf probably needs updating" - ))) - } - Type::Bigdecimal => match BigDecimal::parse_bytes(field.new_value.as_ref()) { - Some(bd) => Ok(Value::BigDecimal(bd)), - None => { - return Err(MappingError::Unknown(anyhow!( - "Unable to parse BigDecimal for entity {}", - entity - ))) +fn decode_value(value: &crate::codec::value::Typed) -> Result { + use codec::value::Typed; + + match value { + Typed::Int32(new_value) => Ok(Value::Int(*new_value)), + + Typed::Bigdecimal(new_value) => BigDecimal::from_str(new_value) + .map(Value::BigDecimal) + .map_err(|err| MappingError::Unknown(anyhow::Error::from(err))), + + Typed::Bigint(new_value) => BigInt::from_str(new_value) + .map(Value::BigInt) + .map_err(|err| MappingError::Unknown(anyhow::Error::from(err))), + + Typed::String(new_value) => { + let mut string = new_value.clone(); + + // Strip null characters since they are not accepted by Postgres. + if string.contains('\u{0000}') { + string = string.replace('\u{0000}', ""); } - }, - Type::Bigint => Ok(Value::BigInt(BigInt::from_signed_bytes_be( - field.new_value.as_ref(), - ))), - Type::Int => { - let mut bytes: [u8; 8] = [0; 8]; - bytes.copy_from_slice(field.new_value.as_ref()); - Ok(Value::Int(i64::from_be_bytes(bytes) as i32)) + Ok(Value::String(string)) } - Type::Bytes => Ok(Value::Bytes(Bytes::from(field.new_value.as_ref()))), - Type::String => Ok(Value::String( - String::from_utf8(field.new_value.clone()) - .map_err(|e| MappingError::Unknown(anyhow::Error::from(e)))?, - )), + + Typed::Bytes(new_value) => base64::decode(&new_value) + .map(|bs| Value::Bytes(Bytes::from(bs.as_ref()))) + .map_err(|err| MappingError::Unknown(anyhow::Error::from(err))), + + Typed::Bool(new_value) => Ok(Value::Bool(*new_value)), + + Typed::Array(arr) => arr + .value + .iter() + .filter_map(|item| item.typed.as_ref().map(decode_value)) + .collect::, MappingError>>() + .map(Value::List), } } #[cfg(test)] mod test { - use std::str::FromStr; + use std::{ops::Add, str::FromStr}; - use crate::codec::field::Type as FieldType; - use crate::codec::Field; - use crate::trigger::decode_entity_change; + use crate::codec::value::Typed; + use crate::codec::{Array, Value}; + use crate::trigger::decode_value; use graph::{ data::store::scalar::Bytes, - prelude::{BigDecimal, BigInt, Value}, + prelude::{BigDecimal, BigInt, Value as GraphValue}, }; #[test] fn validate_substreams_field_types() { struct Case { - field: Field, - entity: String, - expected_new_value: Value, + name: String, + value: Value, + expected_value: GraphValue, } let cases = vec![ Case { - field: Field { - name: "setting string value".to_string(), - value_type: FieldType::String as i32, - new_value: Vec::from( - "d4325ee72c39999e778a9908f5fb0803f78e30c441a5f2ce5c65eee0e0eba59d", - ), - new_value_null: false, - old_value: Vec::from("".to_string()), - old_value_null: true, + name: "string value".to_string(), + value: Value { + typed: Some(Typed::String( + "d4325ee72c39999e778a9908f5fb0803f78e30c441a5f2ce5c65eee0e0eba59d" + .to_string(), + )), }, - entity: "Block".to_string(), - expected_new_value: Value::String( + expected_value: GraphValue::String( "d4325ee72c39999e778a9908f5fb0803f78e30c441a5f2ce5c65eee0e0eba59d".to_string(), ), }, Case { - field: Field { - name: "settings bytes value".to_string(), - value_type: FieldType::Bytes as i32, - new_value: hex::decode( - "445247fe150195bd866516594e087e1728294aa831613f4d48b8ec618908519f", - ) - .unwrap(), - new_value_null: false, - old_value: Vec::from("".to_string()), - old_value_null: true, + name: "bytes value".to_string(), + value: Value { + typed: Some(Typed::Bytes( + base64::encode( + hex::decode( + "445247fe150195bd866516594e087e1728294aa831613f4d48b8ec618908519f", + ) + .unwrap(), + ) + .into_bytes(), + )), }, - entity: "Block".to_string(), - expected_new_value: Value::Bytes( + expected_value: GraphValue::Bytes( Bytes::from_str( "0x445247fe150195bd866516594e087e1728294aa831613f4d48b8ec618908519f", ) @@ -328,122 +336,76 @@ mod test { ), }, Case { - field: Field { - name: "setting int value for block 12369760".to_string(), - value_type: FieldType::Int as i32, - new_value: hex::decode("0000000000bcbf60").unwrap(), - new_value_null: false, - old_value: vec![], - old_value_null: true, - }, - entity: "Block".to_string(), - expected_new_value: Value::Int(12369760), - }, - Case { - field: Field { - name: "setting int value for block 12369622".to_string(), - value_type: FieldType::Int as i32, - new_value: hex::decode("0000000000bcbed6").unwrap(), - new_value_null: false, - old_value: vec![], - old_value_null: true, - }, - entity: "Block".to_string(), - expected_new_value: Value::Int(12369622), - }, - Case { - field: Field { - name: "setting int value for block 12369623".to_string(), - value_type: FieldType::Int as i32, - new_value: hex::decode("0000000000bcbed7").unwrap(), - new_value_null: false, - old_value: vec![], - old_value_null: true, + name: "int value for block".to_string(), + value: Value { + typed: Some(Typed::Int32(12369760)), }, - entity: "Block".to_string(), - expected_new_value: Value::Int(12369623), + expected_value: GraphValue::Int(12369760), }, Case { - field: Field { - name: "setting big int transactions count of 123".to_string(), - value_type: FieldType::Bigint as i32, - new_value: hex::decode("7b").unwrap(), - new_value_null: false, - old_value: vec![], - old_value_null: true, + name: "negative int value".to_string(), + value: Value { + typed: Some(Typed::Int32(-12369760)), }, - entity: "Block".to_string(), - expected_new_value: Value::BigInt(BigInt::from(123u64)), + expected_value: GraphValue::Int(-12369760), }, Case { - field: Field { - name: "setting big int transactions count of 302".to_string(), - value_type: FieldType::Bigint as i32, - new_value: hex::decode("012e").unwrap(), - new_value_null: false, - old_value: vec![], - old_value_null: true, + name: "big int".to_string(), + value: Value { + typed: Some(Typed::Bigint("123".to_string())), }, - entity: "Block".to_string(), - expected_new_value: Value::BigInt(BigInt::from(302u64)), + expected_value: GraphValue::BigInt(BigInt::from(123u64)), }, Case { - field: Field { - name: "setting big int transactions count of 209".to_string(), - value_type: FieldType::Bigint as i32, - new_value: hex::decode("00d1").unwrap(), - new_value_null: false, - old_value: vec![], - old_value_null: true, + name: "big int > u64".to_string(), + value: Value { + typed: Some(Typed::Bigint( + BigInt::from(u64::MAX).add(BigInt::from(1)).to_string(), + )), }, - entity: "Block".to_string(), - expected_new_value: Value::BigInt(BigInt::from(209u64)), + expected_value: GraphValue::BigInt(BigInt::from(u64::MAX).add(BigInt::from(1))), }, Case { - field: Field { - name: "setting big decimal value".to_string(), - value_type: FieldType::Bigdecimal as i32, - new_value: hex::decode("3133363633312e35").unwrap(), - new_value_null: false, - old_value: vec![], - old_value_null: true, + name: "big decimal value".to_string(), + value: Value { + typed: Some(Typed::Bigdecimal("3133363633312e35".to_string())), }, - entity: "Block".to_string(), - expected_new_value: Value::BigDecimal(BigDecimal::from(136631.5)), + expected_value: GraphValue::BigDecimal(BigDecimal::new( + BigInt::from(3133363633312u64), + 35, + )), }, Case { - field: Field { - name: "setting big decimal value 2".to_string(), - value_type: FieldType::Bigdecimal as i32, - new_value: hex::decode("3133303730392e30").unwrap(), - new_value_null: false, - old_value: vec![], - old_value_null: true, + name: "bool value".to_string(), + value: Value { + typed: Some(Typed::Bool(true)), }, - entity: "Block".to_string(), - expected_new_value: Value::BigDecimal(BigDecimal::from(130709.0)), + expected_value: GraphValue::Bool(true), }, Case { - field: Field { - name: "setting big decimal value 3".to_string(), - value_type: FieldType::Bigdecimal as i32, - new_value: hex::decode("39373839322e36").unwrap(), - new_value_null: false, - old_value: vec![], - old_value_null: true, + name: "string array".to_string(), + value: Value { + typed: Some(Typed::Array(Array { + value: vec![ + Value { + typed: Some(Typed::String("1".to_string())), + }, + Value { + typed: Some(Typed::String("2".to_string())), + }, + Value { + typed: Some(Typed::String("3".to_string())), + }, + ], + })), }, - entity: "Block".to_string(), - expected_new_value: Value::BigDecimal(BigDecimal::new(BigInt::from(978926u64), -1)), + expected_value: GraphValue::List(vec!["1".into(), "2".into(), "3".into()]), }, ]; for case in cases.into_iter() { - let value: Value = decode_entity_change(&case.field, &case.entity).unwrap(); - assert_eq!( - case.expected_new_value, value, - "failed case: {}", - case.field.name - ) + let value: GraphValue = decode_value(&case.value.typed.unwrap()).unwrap(); + assert_eq!(case.expected_value, value, "failed case: {}", case.name) } } } diff --git a/core/Cargo.toml b/core/Cargo.toml index 557d791d4be..21a71303542 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -1,15 +1,15 @@ [package] name = "graph-core" -version = "0.27.0" -edition = "2021" +version.workspace = true +edition.workspace = true [dependencies] async-trait = "0.1.50" -atomic_refcell = "0.1.8" +atomic_refcell = "0.1.9" async-stream = "0.3" bytes = "1.0" -futures01 = { package="futures", version="0.1.31" } -futures = { version="0.3.4", features=["compat"] } +futures01 = { package = "futures", version = "0.1.31" } +futures = { version = "0.3.4", features = ["compat"] } graph = { path = "../graph" } # This dependency is temporary. The multiblockchain refactoring is not # finished as long as this dependency exists @@ -20,14 +20,14 @@ graph-chain-cosmos = { path = "../chain/cosmos" } graph-chain-substreams = { path = "../chain/substreams" } lazy_static = "1.2.0" lru_time_cache = "0.11" -semver = "1.0.12" +semver = "1.0.16" serde = "1.0" serde_json = "1.0" serde_yaml = "0.8" # Switch to crates.io once tower 0.5 is released -tower = { git = "https://github.com/tower-rs/tower.git", features = ["util", "limit"] } +tower = { git = "https://github.com/tower-rs/tower.git", features = ["full"] } graph-runtime-wasm = { path = "../runtime/wasm" } -cid = "0.8.6" +cid = "0.10.1" anyhow = "1.0" [dev-dependencies] @@ -36,5 +36,8 @@ graph-mock = { path = "../mock" } test-store = { path = "../store/test-store" } hex = "0.4.3" graphql-parser = "0.4.0" -pretty_assertions = "1.2.1" +pretty_assertions = "1.3.0" anyhow = "1.0" +ipfs-api-backend-hyper = "0.6" +ipfs-api = { version = "0.17.0", features = ["with-hyper-rustls"], default-features = false } +uuid = { version = "1.2.2", features = ["v4"] } diff --git a/core/src/lib.rs b/core/src/lib.rs index 08ac3fc3345..972a45e508f 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -6,4 +6,7 @@ mod subgraph; pub use crate::link_resolver::LinkResolver; pub use crate::metrics::MetricsRegistry; -pub use crate::subgraph::{SubgraphAssignmentProvider, SubgraphInstanceManager, SubgraphRegistrar}; +pub use crate::subgraph::{ + SubgraphAssignmentProvider, SubgraphInstanceManager, SubgraphRegistrar, SubgraphRunner, + SubgraphTriggerProcessor, +}; diff --git a/core/src/link_resolver.rs b/core/src/link_resolver.rs index 9b2be6f107c..10a065ef011 100644 --- a/core/src/link_resolver.rs +++ b/core/src/link_resolver.rs @@ -186,7 +186,7 @@ impl LinkResolverTrait for LinkResolver { let req_path = path.clone(); let timeout = self.timeout; - let data = retry_policy(self.retry, "ipfs.cat", &logger) + let data = retry_policy(self.retry, "ipfs.cat", logger) .run(move || { let path = req_path.clone(); let client = client.clone(); @@ -229,7 +229,7 @@ impl LinkResolverTrait for LinkResolver { restrict_file_size(&link.link, size, max_file_size)?; let link = link.link.clone(); - let data = retry_policy(self.retry, "ipfs.getBlock", &logger) + let data = retry_policy(self.retry, "ipfs.getBlock", logger) .run(move || { let link = link.clone(); let client = client.clone(); @@ -310,7 +310,7 @@ impl LinkResolverTrait for LinkResolver { // run through the loop. match try_ready!(stream.poll().map_err(|e| anyhow::anyhow!("{}", e))) { Some(b) => buf.extend_from_slice(&b), - None if buf.len() > 0 => buf.extend_from_slice(&[b'\n']), + None if !buf.is_empty() => buf.extend_from_slice(&[b'\n']), None => return Ok(Async::Ready(None)), } } diff --git a/core/src/metrics/registry.rs b/core/src/metrics/registry.rs index 12f77cea4fc..067cf4d9faf 100644 --- a/core/src/metrics/registry.rs +++ b/core/src/metrics/registry.rs @@ -210,7 +210,7 @@ impl MetricsRegistryTrait for MetricsRegistry { let id = gauge.desc().first().unwrap().id; let maybe_gauge = self.global_gauges.read().unwrap().get(&id).cloned(); if let Some(gauge) = maybe_gauge { - Ok(gauge.clone()) + Ok(gauge) } else { self.register(name, Box::new(gauge.clone())); self.global_gauges @@ -284,7 +284,7 @@ fn global_counters_are_shared() { let logger = log::logger(false); let prom_reg = Arc::new(Registry::new()); - let registry = MetricsRegistry::new(logger, prom_reg.clone()); + let registry = MetricsRegistry::new(logger, prom_reg); fn check_counters( registry: &MetricsRegistry, diff --git a/core/src/polling_monitor/ipfs_service.rs b/core/src/polling_monitor/ipfs_service.rs index 05d904f17fc..127bf13b073 100644 --- a/core/src/polling_monitor/ipfs_service.rs +++ b/core/src/polling_monitor/ipfs_service.rs @@ -1,63 +1,73 @@ use anyhow::{anyhow, Error}; use bytes::Bytes; -use cid::Cid; -use futures::{Future, FutureExt}; +use futures::future::BoxFuture; use graph::{ - cheap_clone::CheapClone, - ipfs_client::{IpfsClient, StatApi}, - tokio::sync::Semaphore, + ipfs_client::{CidFile, IpfsClient, StatApi}, + prelude::CheapClone, }; -use std::{pin::Pin, sync::Arc, task::Poll, time::Duration}; -use tower::Service; +use std::time::Duration; +use tower::{buffer::Buffer, ServiceBuilder, ServiceExt}; const CLOUDFLARE_TIMEOUT: u16 = 524; const GATEWAY_TIMEOUT: u16 = 504; -/// Reference type, clones will refer to the same service. +pub type IpfsService = Buffer, Error>>>; + +pub fn ipfs_service( + client: IpfsClient, + max_file_size: u64, + timeout: Duration, + concurrency_and_rate_limit: u16, +) -> IpfsService { + let ipfs = IpfsServiceInner { + client, + max_file_size, + timeout, + }; + + let svc = ServiceBuilder::new() + .rate_limit(concurrency_and_rate_limit.into(), Duration::from_secs(1)) + .concurrency_limit(concurrency_and_rate_limit as usize) + .service_fn(move |req| ipfs.cheap_clone().call_inner(req)) + .boxed(); + + // The `Buffer` makes it so the rate and concurrency limit are shared among clones. + Buffer::new(svc, 1) +} + #[derive(Clone)] -pub struct IpfsService { +struct IpfsServiceInner { client: IpfsClient, max_file_size: u64, timeout: Duration, - concurrency_limiter: Arc, } -impl CheapClone for IpfsService { +impl CheapClone for IpfsServiceInner { fn cheap_clone(&self) -> Self { Self { client: self.client.cheap_clone(), max_file_size: self.max_file_size, timeout: self.timeout, - concurrency_limiter: self.concurrency_limiter.cheap_clone(), } } } -impl IpfsService { - pub fn new( - client: IpfsClient, - max_file_size: u64, - timeout: Duration, - concurrency_limit: u16, - ) -> Self { - Self { - client, - max_file_size, - timeout, - concurrency_limiter: Arc::new(Semaphore::new(concurrency_limit as usize)), - } - } - - async fn call(&self, cid: Cid) -> Result, Error> { +impl IpfsServiceInner { + async fn call_inner(self, req: CidFile) -> Result, Error> { + let CidFile { cid, path } = req; let multihash = cid.hash().code(); if !SAFE_MULTIHASHES.contains(&multihash) { return Err(anyhow!("CID multihash {} is not allowed", multihash)); } - let cid_str = cid.to_string(); + let cid_str = match path { + Some(path) => format!("{}/{}", cid, path), + None => cid.to_string(), + }; + let size = match self .client - .stat_size(StatApi::Files, cid_str, self.timeout) + .stat_size(StatApi::Files, cid_str.clone(), self.timeout) .await { Ok(size) => size, @@ -71,7 +81,7 @@ impl IpfsService { if size > self.max_file_size { return Err(anyhow!( "IPFS file {} is too large. It can be at most {} bytes but is {} bytes", - cid.to_string(), + cid_str, self.max_file_size, size )); @@ -79,36 +89,12 @@ impl IpfsService { Ok(self .client - .cat_all(&cid.to_string(), self.timeout) + .cat_all(&cid_str, self.timeout) .await .map(Some)?) } } -impl Service for IpfsService { - type Response = (Cid, Option); - type Error = (Cid, Error); - type Future = Pin> + Send>>; - - fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> Poll> { - // The permit is acquired and immediately dropped, as tower does not yet allow returning it. - // So this is only indicative of capacity being available. - Pin::new(&mut self.concurrency_limiter.acquire().boxed()) - .poll(cx) - .map_ok(|_| ()) - .map_err(|_| unreachable!("semaphore is never closed")) - } - - fn call(&mut self, cid: Cid) -> Self::Future { - let this = self.cheap_clone(); - async move { - let _permit = this.concurrency_limiter.acquire().await; - this.call(cid).await.map(|x| (cid, x)).map_err(|e| (cid, e)) - } - .boxed() - } -} - // Multihashes that are collision resistant. This is not complete but covers the commonly used ones. // Code table: https://github.com/multiformats/multicodec/blob/master/table.csv // rust-multihash code enum: https://github.com/multiformats/rust-multihash/blob/master/src/multihash_impl.rs @@ -129,3 +115,45 @@ const SAFE_MULTIHASHES: [u64; 15] = [ 0xb260, // BLAKE2s-256 (32-byte hash size) 0x1e, // BLAKE3-256 (32-byte hash size) ]; + +#[cfg(test)] +mod test { + use ipfs::IpfsApi; + use ipfs_api as ipfs; + use std::{fs, str::FromStr, time::Duration}; + use tower::ServiceExt; + + use cid::Cid; + use graph::{ipfs_client::IpfsClient, tokio}; + + use uuid::Uuid; + + #[tokio::test] + async fn cat_file_in_folder() { + let path = "./tests/fixtures/ipfs_folder"; + let uid = Uuid::new_v4().to_string(); + fs::write(format!("{}/random.txt", path), &uid).unwrap(); + + let cl: ipfs::IpfsClient = ipfs::IpfsClient::default(); + + let rsp = cl.add_path(&path).await.unwrap(); + + let ipfs_folder = rsp.iter().find(|rsp| rsp.name == "ipfs_folder").unwrap(); + + let local = IpfsClient::localhost(); + let cid = Cid::from_str(&ipfs_folder.hash).unwrap(); + let file = "random.txt".to_string(); + + let svc = super::ipfs_service(local, 100000, Duration::from_secs(5), 10); + + let content = svc + .oneshot(super::CidFile { + cid, + path: Some(file), + }) + .await + .unwrap() + .unwrap(); + assert_eq!(content.to_vec(), uid.as_bytes().to_vec()); + } +} diff --git a/core/src/polling_monitor/metrics.rs b/core/src/polling_monitor/metrics.rs index c7cfa89bc90..86d65790a7b 100644 --- a/core/src/polling_monitor/metrics.rs +++ b/core/src/polling_monitor/metrics.rs @@ -46,7 +46,7 @@ impl PollingMonitorMetrics { requests, errors, not_found, - queue_depth: queue_depth.into(), + queue_depth, } } diff --git a/core/src/polling_monitor/mod.rs b/core/src/polling_monitor/mod.rs index cb8dbf4c340..e50979d39f2 100644 --- a/core/src/polling_monitor/mod.rs +++ b/core/src/polling_monitor/mod.rs @@ -1,20 +1,94 @@ -pub mod ipfs_service; +mod ipfs_service; mod metrics; +use std::collections::HashMap; use std::fmt::Display; +use std::hash::Hash; use std::sync::Arc; +use std::task::Poll; +use std::time::Duration; -use futures::stream; +use futures::future::BoxFuture; use futures::stream::StreamExt; +use futures::{stream, Future, FutureExt, TryFutureExt}; use graph::cheap_clone::CheapClone; use graph::parking_lot::Mutex; use graph::prelude::tokio; +use graph::prometheus::{Counter, Gauge}; use graph::slog::{debug, Logger}; use graph::util::monitored::MonitoredVecDeque as VecDeque; use tokio::sync::{mpsc, watch}; +use tower::retry::backoff::{Backoff, ExponentialBackoff, ExponentialBackoffMaker, MakeBackoff}; +use tower::util::rng::HasherRng; use tower::{Service, ServiceExt}; pub use self::metrics::PollingMonitorMetrics; +pub use ipfs_service::{ipfs_service, IpfsService}; + +const MIN_BACKOFF: Duration = Duration::from_secs(5); + +const MAX_BACKOFF: Duration = Duration::from_secs(600); + +struct Backoffs { + backoff_maker: ExponentialBackoffMaker, + backoffs: HashMap, +} + +impl Backoffs { + fn new() -> Self { + // Unwrap: Config is constant and valid. + Self { + backoff_maker: ExponentialBackoffMaker::new( + MIN_BACKOFF, + MAX_BACKOFF, + 1.0, + HasherRng::new(), + ) + .unwrap(), + backoffs: HashMap::new(), + } + } + + fn next_backoff(&mut self, id: ID) -> impl Future { + self.backoffs + .entry(id) + .or_insert_with(|| self.backoff_maker.make_backoff()) + .next_backoff() + } + + fn remove(&mut self, id: &ID) { + self.backoffs.remove(id); + } +} + +// A queue that notifies `waker` whenever an element is pushed. +struct Queue { + queue: Mutex>, + waker: watch::Sender<()>, +} + +impl Queue { + fn new(depth: Gauge, popped: Counter) -> (Arc, watch::Receiver<()>) { + let queue = Mutex::new(VecDeque::new(depth, popped)); + let (waker, woken) = watch::channel(()); + let this = Queue { queue, waker }; + (Arc::new(this), woken) + } + + fn push_back(&self, e: T) { + self.queue.lock().push_back(e); + let _ = self.waker.send(()); + } + + fn push_front(&self, e: T) { + self.queue.lock().push_front(e); + let _ = self.waker.send(()); + } + + fn pop_front(&self) -> Option { + self.queue.lock().pop_front() + } +} /// Spawn a monitor that actively polls a service. Whenever the service has capacity, the monitor /// pulls object ids from the queue and polls the service. If the object is not present or in case @@ -22,42 +96,48 @@ pub use self::metrics::PollingMonitorMetrics; /// /// The service returns the request ID along with errors or responses. The response is an /// `Option`, to represent the object not being found. -pub fn spawn_monitor( +pub fn spawn_monitor( service: S, - response_sender: mpsc::Sender<(ID, Response)>, + response_sender: mpsc::Sender<(ID, Res)>, logger: Logger, metrics: PollingMonitorMetrics, ) -> PollingMonitor where - ID: Display + Send + 'static, - S: Service), Error = (ID, E)> + Send + 'static, + S: Service, Error = E> + Send + 'static, + ID: Display + Clone + Default + Eq + Send + Sync + Hash + 'static, E: Display + Send + 'static, S::Future: Send, { - let queue = Arc::new(Mutex::new(VecDeque::new( - metrics.queue_depth.clone(), - metrics.requests.clone(), - ))); - let (wake_up_queue, queue_woken) = watch::channel(()); + let service = ReturnRequest { service }; + let (queue, queue_woken) = Queue::new(metrics.queue_depth.clone(), metrics.requests.clone()); + let cancel_check = response_sender.clone(); let queue_to_stream = { let queue = queue.cheap_clone(); stream::unfold((), move |()| { let queue = queue.cheap_clone(); let mut queue_woken = queue_woken.clone(); + let cancel_check = cancel_check.clone(); async move { loop { - let id = queue.lock().pop_front(); + if cancel_check.is_closed() { + break None; + } + + let id = queue.pop_front(); match id { Some(id) => break Some((id, ())), - None => match queue_woken.changed().await { - // Queue woken, check it. - Ok(()) => {} - - // The `PollingMonitor` has been dropped, cancel this task. - Err(_) => break None, - }, - }; + + // Nothing on the queue, wait for a queue wake up or cancellation. + None => { + futures::future::select( + // Unwrap: `queue` holds a sender. + queue_woken.changed().map(|r| r.unwrap()).boxed(), + cancel_check.closed().boxed(), + ) + .await; + } + } } } }) @@ -66,10 +146,12 @@ where { let queue = queue.cheap_clone(); graph::spawn(async move { + let mut backoffs = Backoffs::new(); let mut responses = service.call_all(queue_to_stream).unordered().boxed(); while let Some(response) = responses.next().await { match response { Ok((id, Some(response))) => { + backoffs.remove(&id); let send_result = response_sender.send((id, response)).await; if send_result.is_err() { // The receiver has been dropped, cancel this task. @@ -80,7 +162,7 @@ where // Object not found, push the id to the back of the queue. Ok((id, None)) => { metrics.not_found.inc(); - queue.lock().push_back(id); + queue.push_back(id); } // Error polling, log it and push the id to the back of the queue. @@ -89,87 +171,116 @@ where "error" => format!("{:#}", e), "object_id" => id.to_string()); metrics.errors.inc(); - queue.lock().push_back(id); + + // Requests that return errors could mean there is a permanent issue with + // fetching the given item, or could signal the endpoint is overloaded. + // Either way a backoff makes sense. + let queue = queue.cheap_clone(); + let backoff = backoffs.next_backoff(id.clone()); + graph::spawn(async move { + backoff.await; + queue.push_back(id); + }); } } } }); } - PollingMonitor { - queue, - wake_up_queue, - } + PollingMonitor { queue } } /// Handle for adding objects to be monitored. pub struct PollingMonitor { - queue: Arc>>, - - // This serves two purposes, to wake up the monitor when an item arrives on an empty queue, and - // to stop the montior task when this handle is dropped. - wake_up_queue: watch::Sender<()>, + queue: Arc>, } impl PollingMonitor { /// Add an object id to the polling queue. New requests have priority and are pushed to the /// front of the queue. pub fn monitor(&self, id: ID) { - let mut queue = self.queue.lock(); - if queue.is_empty() { - // If the send fails, the response receiver has been dropped, so this handle is useless. - let _ = self.wake_up_queue.send(()); - } - queue.push_front(id); + self.queue.push_front(id); + } +} + +struct ReturnRequest { + service: S, +} + +impl Service for ReturnRequest +where + S: Service, + Req: Clone + Default + Send + Sync + 'static, + S::Error: Send, + S::Future: Send + 'static, +{ + type Response = (Req, S::Response); + type Error = (Req, S::Error); + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> Poll> { + // `Req::default` is a value that won't be used since if `poll_ready` errors, the service is shot anyways. + self.service.poll_ready(cx).map_err(|e| (Req::default(), e)) + } + + fn call(&mut self, req: Req) -> Self::Future { + let req1 = req.clone(); + self.service + .call(req.clone()) + .map_ok(move |x| (req, x)) + .map_err(move |e| (req1, e)) + .boxed() } } #[cfg(test)] mod tests { use anyhow::anyhow; - use futures::{Future, FutureExt, TryFutureExt}; use graph::log; - use std::{pin::Pin, task::Poll}; use tower_test::mock; use super::*; - struct MockService(mock::Mock<&'static str, Option<&'static str>>); - - impl Service<&'static str> for MockService { - type Response = (&'static str, Option<&'static str>); - - type Error = (&'static str, anyhow::Error); - - type Future = Pin> + Send>>; - - fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> Poll> { - self.0.poll_ready(cx).map_err(|_| unreachable!()) - } + async fn send_response(handle: &mut mock::Handle, res: U) { + handle.next_request().await.unwrap().1.send_response(res) + } - fn call(&mut self, req: &'static str) -> Self::Future { - self.0 - .call(req) - .map_ok(move |x| (req, x)) - .map_err(move |e| (req, anyhow!(e.to_string()))) - .boxed() - } + fn setup() -> ( + mock::Handle<&'static str, Option<&'static str>>, + PollingMonitor<&'static str>, + mpsc::Receiver<(&'static str, &'static str)>, + ) { + let (svc, handle) = mock::pair(); + let (tx, rx) = mpsc::channel(10); + let monitor = spawn_monitor(svc, tx, log::discard(), PollingMonitorMetrics::mock()); + (handle, monitor, rx) } - async fn send_response(handle: &mut mock::Handle, res: U) { - handle.next_request().await.unwrap().1.send_response(res) + #[tokio::test] + async fn polling_monitor_shared_svc() { + let (svc, mut handle) = mock::pair(); + let shared_svc = tower::buffer::Buffer::new(tower::limit::ConcurrencyLimit::new(svc, 1), 1); + let make_monitor = |svc| { + let (tx, rx) = mpsc::channel(10); + let metrics = PollingMonitorMetrics::mock(); + let monitor = spawn_monitor(svc, tx, log::discard(), metrics); + (monitor, rx) + }; + + // Spawn a monitor and yield to ensure it is polled and waiting on the tx. + let (_monitor0, mut _rx0) = make_monitor(shared_svc.clone()); + tokio::task::yield_now().await; + + // Test that the waiting monitor above is not occupying a concurrency slot on the service. + let (monitor1, mut rx1) = make_monitor(shared_svc); + monitor1.monitor("req-0"); + send_response(&mut handle, Some("res-0")).await; + assert_eq!(rx1.recv().await, Some(("req-0", "res-0"))); } #[tokio::test] async fn polling_monitor_simple() { - let (svc, mut handle) = mock::pair(); - let (tx, mut rx) = mpsc::channel(10); - let monitor = spawn_monitor( - MockService(svc), - tx, - log::discard(), - PollingMonitorMetrics::mock(), - ); + let (mut handle, monitor, mut rx) = setup(); // Basic test, single file is immediately available. monitor.monitor("req-0"); @@ -179,14 +290,7 @@ mod tests { #[tokio::test] async fn polling_monitor_unordered() { - let (svc, mut handle) = mock::pair(); - let (tx, mut rx) = mpsc::channel(10); - let monitor = spawn_monitor( - MockService(svc), - tx, - log::discard(), - PollingMonitorMetrics::mock(), - ); + let (mut handle, monitor, mut rx) = setup(); // Test unorderedness of the response stream, and the LIFO semantics of `monitor`. // @@ -203,12 +307,7 @@ mod tests { #[tokio::test] async fn polling_monitor_failed_push_to_back() { - let (svc, mut handle) = mock::pair(); - let (tx, mut rx) = mpsc::channel(10); - - // Limit service to one request at a time. - let svc = tower::limit::ConcurrencyLimit::new(MockService(svc), 1); - let monitor = spawn_monitor(svc, tx, log::discard(), PollingMonitorMetrics::mock()); + let (mut handle, monitor, mut rx) = setup(); // Test that objects not found go on the back of the queue. monitor.monitor("req-0"); @@ -232,32 +331,22 @@ mod tests { #[tokio::test] async fn polling_monitor_cancelation() { - let (svc, _handle) = mock::pair(); - let (tx, mut rx) = mpsc::channel(10); - let monitor = spawn_monitor( - MockService(svc), - tx, - log::discard(), - PollingMonitorMetrics::mock(), - ); - - // Cancelation on monitor drop. - drop(monitor); - assert_eq!(rx.recv().await, None); + // Cancelation on receiver drop, no pending request. + let (mut handle, _monitor, rx) = setup(); + drop(rx); + assert!(handle.next_request().await.is_none()); - let (svc, mut handle) = mock::pair(); - let (tx, rx) = mpsc::channel(10); - let monitor = spawn_monitor( - MockService(svc), - tx, - log::discard(), - PollingMonitorMetrics::mock(), - ); - - // Cancelation on receiver drop. + // Cancelation on receiver drop, with pending request. + let (mut handle, monitor, rx) = setup(); monitor.monitor("req-0"); drop(rx); - send_response(&mut handle, Some("res-0")).await; assert!(handle.next_request().await.is_none()); + + // Cancelation on receiver drop, while queue is waiting. + let (mut handle, _monitor, rx) = setup(); + let handle = tokio::spawn(async move { handle.next_request().await }); + tokio::task::yield_now().await; + drop(rx); + assert!(handle.await.unwrap().is_none()); } } diff --git a/core/src/subgraph/context.rs b/core/src/subgraph/context.rs index 653d6c8875f..64195128f49 100644 --- a/core/src/subgraph/context.rs +++ b/core/src/subgraph/context.rs @@ -1,18 +1,16 @@ pub mod instance; -use crate::polling_monitor::{ - ipfs_service::IpfsService, spawn_monitor, PollingMonitor, PollingMonitorMetrics, -}; +use crate::polling_monitor::{spawn_monitor, IpfsService, PollingMonitor, PollingMonitorMetrics}; use anyhow::{self, Error}; use bytes::Bytes; -use cid::Cid; use graph::{ blockchain::Blockchain, components::{ store::{DeploymentId, SubgraphFork}, subgraph::{MappingError, SharedProofOfIndexing}, }, - data_source::{offchain, DataSource, TriggerData}, + data_source::{offchain, CausalityRegion, DataSource, TriggerData}, + ipfs_client::CidFile, prelude::{ BlockNumber, BlockState, CancelGuard, DeploymentHash, MetricsRegistry, RuntimeHostBuilder, SubgraphInstanceMetrics, TriggerProcessor, @@ -30,9 +28,9 @@ pub type SharedInstanceKeepAliveMap = Arc +pub struct IndexingContext where T: RuntimeHostBuilder, C: Blockchain, @@ -74,7 +72,7 @@ impl> IndexingContext { ) -> Result, MappingError> { self.process_trigger_in_hosts( logger, - &self.instance.hosts(), + self.instance.hosts(), block, trigger, state, @@ -113,15 +111,21 @@ impl> IndexingContext { .await } - // Removes data sources hosts with a creation block greater or equal to `reverted_block`, so - // that they are no longer candidates for `process_trigger`. - // - // This does not currently affect the `offchain_monitor` or the `filter`, so they will continue - // to include data sources that have been reverted. This is not ideal for performance, but it - // does not affect correctness since triggers that have no matching host will be ignored by - // `process_trigger`. - pub fn revert_data_sources(&mut self, reverted_block: BlockNumber) { - self.instance.revert_data_sources(reverted_block) + /// Removes data sources hosts with a creation block greater or equal to `reverted_block`, so + /// that they are no longer candidates for `process_trigger`. + /// + /// This does not currently affect the `offchain_monitor` or the `filter`, so they will continue + /// to include data sources that have been reverted. This is not ideal for performance, but it + /// does not affect correctness since triggers that have no matching host will be ignored by + /// `process_trigger`. + /// + /// File data sources that have been marked not done during this process will get re-queued + pub fn revert_data_sources(&mut self, reverted_block: BlockNumber) -> Result<(), Error> { + let removed = self.instance.revert_data_sources(reverted_block); + + removed + .into_iter() + .try_for_each(|source| self.offchain_monitor.add_source(source)) } pub fn add_dynamic_data_source( @@ -134,17 +138,26 @@ impl> IndexingContext { if host.is_some() { if let Some(source) = source { - self.offchain_monitor.add_source(&source)?; + self.offchain_monitor.add_source(source)?; } } Ok(host) } + + pub fn causality_region_next_value(&mut self) -> CausalityRegion { + self.instance.causality_region_next_value() + } + + #[cfg(debug_assertions)] + pub fn instance(&self) -> &SubgraphInstance { + &self.instance + } } -pub(crate) struct OffchainMonitor { - ipfs_monitor: PollingMonitor, - ipfs_monitor_rx: mpsc::Receiver<(Cid, Bytes)>, +pub struct OffchainMonitor { + ipfs_monitor: PollingMonitor, + ipfs_monitor_rx: mpsc::Receiver<(CidFile, Bytes)>, } impl OffchainMonitor { @@ -167,9 +180,9 @@ impl OffchainMonitor { } } - fn add_source(&mut self, source: &offchain::Source) -> Result<(), Error> { + fn add_source(&mut self, source: offchain::Source) -> Result<(), Error> { match source { - offchain::Source::Ipfs(cid) => self.ipfs_monitor.monitor(cid.clone()), + offchain::Source::Ipfs(cid_file) => self.ipfs_monitor.monitor(cid_file), }; Ok(()) } @@ -180,8 +193,8 @@ impl OffchainMonitor { let mut triggers = vec![]; loop { match self.ipfs_monitor_rx.try_recv() { - Ok((cid, data)) => triggers.push(offchain::TriggerData { - source: offchain::Source::Ipfs(cid), + Ok((cid_file, data)) => triggers.push(offchain::TriggerData { + source: offchain::Source::Ipfs(cid_file), data: Arc::new(data), }), Err(TryRecvError::Disconnected) => { diff --git a/core/src/subgraph/context/instance.rs b/core/src/subgraph/context/instance.rs index a9e3ade7778..d760dad1386 100644 --- a/core/src/subgraph/context/instance.rs +++ b/core/src/subgraph/context/instance.rs @@ -1,14 +1,17 @@ use futures01::sync::mpsc::Sender; use graph::{ blockchain::Blockchain, - data_source::{DataSource, DataSourceTemplate}, + data_source::{ + causality_region::CausalityRegionSeq, offchain, CausalityRegion, DataSource, + DataSourceTemplate, + }, prelude::*, }; use std::collections::HashMap; use super::OffchainMonitor; -pub(crate) struct SubgraphInstance> { +pub struct SubgraphInstance> { subgraph_id: DeploymentHash, network: String, host_builder: T, @@ -24,6 +27,9 @@ pub(crate) struct SubgraphInstance> { /// Maps the hash of a module to a channel to the thread in which the module is instantiated. module_cache: HashMap<[u8; 32], Sender>, + + /// This manages the sequence of causality regions for the subgraph. + causality_region_seq: CausalityRegionSeq, } impl SubgraphInstance @@ -37,6 +43,7 @@ where host_builder: T, host_metrics: Arc, offchain_monitor: &mut OffchainMonitor, + causality_region_seq: CausalityRegionSeq, ) -> Result { let subgraph_id = manifest.id.clone(); let network = manifest.network_name(); @@ -50,6 +57,7 @@ where module_cache: HashMap::new(), templates, host_metrics, + causality_region_seq, }; // Create a new runtime host for each data source in the subgraph manifest; @@ -67,7 +75,10 @@ where }; if let DataSource::Offchain(ds) = &ds { - offchain_monitor.add_source(&ds.source)?; + // monitor data source only if it's not processed. + if !ds.is_processed() { + offchain_monitor.add_source(ds.source.clone())?; + } } let host = this.new_host(logger.cheap_clone(), ds, module_bytes)?; @@ -116,13 +127,11 @@ where data_source: DataSource, ) -> Result>, Error> { // Protect against creating more than the allowed maximum number of data sources - if let Some(max_data_sources) = ENV_VARS.subgraph_max_data_sources { - if self.hosts.len() >= max_data_sources { - anyhow::bail!( - "Limit of {} data sources per subgraph exceeded", - max_data_sources, - ); - } + if self.hosts.len() >= ENV_VARS.subgraph_max_data_sources { + anyhow::bail!( + "Limit of {} data sources per subgraph exceeded", + ENV_VARS.subgraph_max_data_sources, + ); } // `hosts` will remain ordered by the creation block. @@ -147,7 +156,42 @@ where }) } - pub(super) fn revert_data_sources(&mut self, reverted_block: BlockNumber) { + /// Reverts any DataSources that have been added from the block forwards (inclusively) + /// This function also reverts the done_at status if it was 'done' on this block or later. + /// It only returns the offchain::Source because we don't currently need to know which + /// DataSources were removed, the source is used so that the offchain DDS can be found again. + pub(super) fn revert_data_sources( + &mut self, + reverted_block: BlockNumber, + ) -> Vec { + self.revert_hosts_cheap(reverted_block); + + // The following code handles resetting offchain datasources so in most + // cases this is enough processing. + // At some point we prolly need to improve the linear search but for now this + // should be fine. *IT'S FINE* + // + // Any File DataSources (Dynamic Data Sources), will have their own causality region + // which currently is the next number of the sequence but that should be an internal detail. + // Regardless of the sequence logic, if the current causality region is ONCHAIN then there are + // no others and therefore the remaining code is a noop and we can just stop here. + if self.causality_region_seq.0 == CausalityRegion::ONCHAIN { + return vec![]; + } + + self.hosts + .iter() + .filter(|host| matches!(host.done_at(), Some(done_at) if done_at >= reverted_block)) + .map(|host| { + host.set_done_at(None); + // Safe to call unwrap() because only offchain DataSources have done_at = Some + host.data_source().as_offchain().unwrap().source.clone() + }) + .collect() + } + + /// Because hosts are ordered, removing them based on creation block is cheap and simple. + fn revert_hosts_cheap(&mut self, reverted_block: BlockNumber) { // `hosts` is ordered by the creation block. // See also 8f1bca33-d3b7-4035-affc-fd6161a12448. while self @@ -160,7 +204,11 @@ where } } - pub(super) fn hosts(&self) -> &[Arc] { + pub fn hosts(&self) -> &[Arc] { &self.hosts } + + pub(super) fn causality_region_next_value(&mut self) -> CausalityRegion { + self.causality_region_seq.next_val() + } } diff --git a/core/src/subgraph/error.rs b/core/src/subgraph/error.rs index a8528fd81f7..b3131255aed 100644 --- a/core/src/subgraph/error.rs +++ b/core/src/subgraph/error.rs @@ -4,7 +4,7 @@ use graph::prelude::{thiserror, Error, StoreError}; #[derive(thiserror::Error, Debug)] pub enum BlockProcessingError { #[error("{0:#}")] - Unknown(Error), + Unknown(#[from] Error), // The error had a deterministic cause but, for a possibly non-deterministic reason, we chose to // halt processing due to the error. @@ -21,12 +21,6 @@ impl BlockProcessingError { } } -impl From for BlockProcessingError { - fn from(e: Error) -> Self { - BlockProcessingError::Unknown(e) - } -} - impl From for BlockProcessingError { fn from(e: StoreError) -> Self { BlockProcessingError::Unknown(e.into()) diff --git a/core/src/subgraph/instance_manager.rs b/core/src/subgraph/instance_manager.rs index 6085c4841e7..da171447748 100644 --- a/core/src/subgraph/instance_manager.rs +++ b/core/src/subgraph/instance_manager.rs @@ -1,14 +1,17 @@ -use crate::polling_monitor::ipfs_service::IpfsService; +use crate::polling_monitor::IpfsService; use crate::subgraph::context::{IndexingContext, SharedInstanceKeepAliveMap}; use crate::subgraph::inputs::IndexingInputs; use crate::subgraph::loader::load_dynamic_data_sources; + use crate::subgraph::runner::SubgraphRunner; use graph::blockchain::block_stream::BlockStreamMetrics; use graph::blockchain::Blockchain; use graph::blockchain::NodeCapabilities; use graph::blockchain::{BlockchainKind, TriggerFilter}; use graph::components::subgraph::ProofOfIndexingVersion; -use graph::data::subgraph::SPEC_VERSION_0_0_6; +use graph::data::subgraph::{UnresolvedSubgraphManifest, SPEC_VERSION_0_0_6}; +use graph::data_source::causality_region::CausalityRegionSeq; +use graph::env::EnvVars; use graph::prelude::{SubgraphInstanceManager as SubgraphInstanceManagerTrait, *}; use graph::{blockchain::BlockchainMap, components::store::DeploymentLocator}; use graph_runtime_wasm::module::ToAscPtr; @@ -18,16 +21,18 @@ use tokio::task; use super::context::OffchainMonitor; use super::SubgraphTriggerProcessor; +#[derive(Clone)] pub struct SubgraphInstanceManager { logger_factory: LoggerFactory, subgraph_store: Arc, chains: Arc, metrics_registry: Arc, - manager_metrics: SubgraphInstanceManagerMetrics, + manager_metrics: Arc, instances: SharedInstanceKeepAliveMap, link_resolver: Arc, ipfs_service: IpfsService, static_filters: bool, + env_vars: Arc, } #[async_trait] @@ -41,66 +46,83 @@ impl SubgraphInstanceManagerTrait for SubgraphInstanceManager< let logger = self.logger_factory.subgraph_logger(&loc); let err_logger = logger.clone(); let instance_manager = self.cheap_clone(); + let manager_metrics = instance_manager.manager_metrics.clone(); let subgraph_start_future = async move { match BlockchainKind::from_manifest(&manifest)? { BlockchainKind::Arweave => { - instance_manager - .start_subgraph_inner::( - logger, - loc, + let runner = instance_manager + .build_subgraph_runner::( + logger.clone(), + self.env_vars.cheap_clone(), + loc.clone(), manifest, stop_block, Box::new(SubgraphTriggerProcessor {}), ) - .await + .await?; + + self.start_subgraph_inner(logger, loc, runner).await } BlockchainKind::Ethereum => { - instance_manager - .start_subgraph_inner::( - logger, - loc, + let runner = instance_manager + .build_subgraph_runner::( + logger.clone(), + self.env_vars.cheap_clone(), + loc.clone(), manifest, stop_block, Box::new(SubgraphTriggerProcessor {}), ) - .await + .await?; + + self.start_subgraph_inner(logger, loc, runner).await } BlockchainKind::Near => { - instance_manager - .start_subgraph_inner::( - logger, - loc, + let runner = instance_manager + .build_subgraph_runner::( + logger.clone(), + self.env_vars.cheap_clone(), + loc.clone(), manifest, stop_block, Box::new(SubgraphTriggerProcessor {}), ) - .await + .await?; + + self.start_subgraph_inner(logger, loc, runner).await } BlockchainKind::Cosmos => { - instance_manager - .start_subgraph_inner::( - logger, - loc, + let runner = instance_manager + .build_subgraph_runner::( + logger.clone(), + self.env_vars.cheap_clone(), + loc.clone(), manifest, stop_block, Box::new(SubgraphTriggerProcessor {}), ) - .await + .await?; + + self.start_subgraph_inner(logger, loc, runner).await } BlockchainKind::Substreams => { - instance_manager - .start_subgraph_inner::( - logger, + let runner = instance_manager + .build_subgraph_runner::( + logger.clone(), + self.env_vars.cheap_clone(), loc.cheap_clone(), manifest, stop_block, - Box::new(graph_chain_substreams::TriggerProcessor::new(loc)), + Box::new(graph_chain_substreams::TriggerProcessor::new(loc.clone())), ) - .await + .await?; + + self.start_subgraph_inner(logger, loc, runner).await } } }; + // Perform the actual work of starting the subgraph in a separate // task. If the subgraph is a graft or a copy, starting it will // perform the actual work of grafting/copying, which can take @@ -108,7 +130,7 @@ impl SubgraphInstanceManagerTrait for SubgraphInstanceManager< // manager does not hang because of that work. graph::spawn(async move { match subgraph_start_future.await { - Ok(()) => self.manager_metrics.subgraph_count.inc(), + Ok(()) => manager_metrics.subgraph_count.inc(), Err(err) => error!( err_logger, "Failed to start subgraph"; @@ -119,21 +141,30 @@ impl SubgraphInstanceManagerTrait for SubgraphInstanceManager< }); } - fn stop_subgraph(&self, loc: DeploymentLocator) { + async fn stop_subgraph(&self, loc: DeploymentLocator) { let logger = self.logger_factory.subgraph_logger(&loc); - info!(logger, "Stop subgraph"); + + match self.subgraph_store.stop_subgraph(&loc).await { + Ok(()) => debug!(logger, "Stopped subgraph writer"), + Err(err) => { + error!(logger, "Error stopping subgraph writer"; "error" => format!("{:#}", err)) + } + } // Drop the cancel guard to shut down the subgraph now let mut instances = self.instances.write().unwrap(); instances.remove(&loc.id); self.manager_metrics.subgraph_count.dec(); + + info!(logger, "Stopped subgraph"); } } impl SubgraphInstanceManager { pub fn new( logger_factory: &LoggerFactory, + env_vars: Arc, subgraph_store: Arc, chains: Arc, metrics_registry: Arc, @@ -148,75 +179,89 @@ impl SubgraphInstanceManager { logger_factory, subgraph_store, chains, - manager_metrics: SubgraphInstanceManagerMetrics::new(metrics_registry.cheap_clone()), + manager_metrics: Arc::new(SubgraphInstanceManagerMetrics::new( + metrics_registry.cheap_clone(), + )), metrics_registry, instances: SharedInstanceKeepAliveMap::default(), link_resolver, ipfs_service, static_filters, + env_vars, } } - async fn start_subgraph_inner( - self: Arc, + pub async fn build_subgraph_runner( + &self, logger: Logger, + env_vars: Arc, deployment: DeploymentLocator, manifest: serde_yaml::Mapping, stop_block: Option, tp: Box>>, - ) -> Result<(), Error> + ) -> anyhow::Result>> where + C: Blockchain, ::MappingTrigger: ToAscPtr, { let subgraph_store = self.subgraph_store.cheap_clone(); let registry = self.metrics_registry.cheap_clone(); + let store = self .subgraph_store .cheap_clone() .writable(logger.clone(), deployment.id) .await?; + let raw_yaml = serde_yaml::to_string(&manifest).unwrap(); + let manifest = UnresolvedSubgraphManifest::parse(deployment.hash.cheap_clone(), manifest)?; + + // Allow for infinite retries for subgraph definition files. + let link_resolver = Arc::from(self.link_resolver.with_retries()); + + // Make sure the `raw_yaml` is present on both this subgraph and the graft base. + self.subgraph_store + .set_manifest_raw_yaml(&deployment.hash, raw_yaml) + .await?; + if let Some(graft) = &manifest.graft { + if self.subgraph_store.is_deployed(&graft.base)? { + let file_bytes = self + .link_resolver + .cat(&logger, &graft.base.to_ipfs_link()) + .await?; + let yaml = String::from_utf8(file_bytes)?; + + self.subgraph_store + .set_manifest_raw_yaml(&graft.base, yaml) + .await?; + } + } + + info!(logger, "Resolve subgraph files using IPFS"); + + let mut manifest = manifest + .resolve(&link_resolver, &logger, ENV_VARS.max_spec_version.clone()) + .await?; + + info!(logger, "Successfully resolved subgraph files using IPFS"); + + let manifest_idx_and_name: Vec<(u32, String)> = manifest.template_idx_and_name().collect(); + // Start the subgraph deployment before reading dynamic data // sources; if the subgraph is a graft or a copy, starting it will // do the copying and dynamic data sources won't show up until after // that is done store.start_subgraph_deployment(&logger).await?; - let (manifest, manifest_idx_and_name) = { - info!(logger, "Resolve subgraph files using IPFS"); - - let mut manifest = SubgraphManifest::resolve_from_raw( - deployment.hash.cheap_clone(), - manifest, - // Allow for infinite retries for subgraph definition files. - &Arc::from(self.link_resolver.with_retries()), - &logger, - ENV_VARS.max_spec_version.clone(), - ) - .await - .context("Failed to resolve subgraph from IPFS")?; - - // We cannot include static data sources in the map because a static data source and a - // template may have the same name in the manifest. - let ds_len = manifest.data_sources.len() as u32; - let manifest_idx_and_name: Vec<(u32, String)> = manifest - .templates - .iter() - .map(|t| t.name().to_owned()) - .enumerate() - .map(|(idx, name)| (ds_len + idx as u32, name)) - .collect(); - - let data_sources = load_dynamic_data_sources( - store.clone(), - logger.clone(), - &manifest, - manifest_idx_and_name.clone(), - ) - .await - .context("Failed to load dynamic data sources")?; - - info!(logger, "Successfully resolved subgraph files using IPFS"); + // Dynamic data sources are loaded by appending them to the manifest. + // + // Refactor: Preferrably we'd avoid any mutation of the manifest. + let (manifest, static_data_sources) = { + let data_sources = load_dynamic_data_sources(store.clone(), logger.clone(), &manifest) + .await + .context("Failed to load dynamic data sources")?; + + let static_data_sources = manifest.data_sources.clone(); // Add dynamic data sources to the subgraph manifest.data_sources.extend(data_sources); @@ -227,9 +272,12 @@ impl SubgraphInstanceManager { manifest.data_sources.len() ); - (manifest, manifest_idx_and_name) + (manifest, static_data_sources) }; + let static_filters = + self.static_filters || manifest.data_sources.len() >= ENV_VARS.static_filters_threshold; + let onchain_data_sources = manifest .data_sources .iter() @@ -244,10 +292,20 @@ impl SubgraphInstanceManager { .with_context(|| format!("no chain configured for network {}", network))? .clone(); - // Obtain filters from the manifest - let mut filter = C::TriggerFilter::from_data_sources(onchain_data_sources.iter()); + // if static_filters is enabled, build a minimal filter with the static data sources and + // add the necessary filters based on templates. + // if not enabled we just stick to the filter based on all the data sources. + // This specifically removes dynamic data sources based filters because these can be derived + // from templates AND this reduces the cost of egress traffic by making the payloads smaller. + let filter = if static_filters { + if !self.static_filters { + info!(logger, "forcing subgraph to use static filters.") + } + + let onchain_data_sources = static_data_sources.iter().filter_map(|d| d.as_onchain()); + + let mut filter = C::TriggerFilter::from_data_sources(onchain_data_sources); - if self.static_filters { filter.extend_with_template( manifest .templates @@ -255,7 +313,10 @@ impl SubgraphInstanceManager { .filter_map(|ds| ds.as_onchain()) .cloned(), ); - } + filter + } else { + C::TriggerFilter::from_data_sources(onchain_data_sources.iter()) + }; let start_blocks = manifest.start_blocks(); @@ -282,16 +343,18 @@ impl SubgraphInstanceManager { &deployment, &required_capabilities, e))?.clone(); - let subgraph_metrics = Arc::new(SubgraphInstanceMetrics::new( + let host_metrics = Arc::new(HostMetrics::new( registry.cheap_clone(), deployment.hash.as_str(), + stopwatch_metrics.clone(), )); - let subgraph_metrics_unregister = subgraph_metrics.clone(); - let host_metrics = Arc::new(HostMetrics::new( + + let subgraph_metrics = Arc::new(SubgraphInstanceMetrics::new( registry.cheap_clone(), deployment.hash.as_str(), stopwatch_metrics.clone(), )); + let block_stream_metrics = Arc::new(BlockStreamMetrics::new( registry.cheap_clone(), &deployment.hash, @@ -304,7 +367,7 @@ impl SubgraphInstanceManager { logger.cheap_clone(), registry.cheap_clone(), &manifest.id, - self.ipfs_service.cheap_clone(), + self.ipfs_service.clone(), ); // Initialize deployment_head with current deployment head. Any sort of trouble in @@ -326,12 +389,16 @@ impl SubgraphInstanceManager { ProofOfIndexingVersion::Legacy }; + let causality_region_seq = + CausalityRegionSeq::from_current(store.causality_region_curr_val().await?); + let instance = super::context::instance::SubgraphInstance::from_manifest( &logger, manifest, host_builder, host_metrics.clone(), &mut offchain_monitor, + causality_region_seq, )?; let inputs = IndexingInputs { @@ -345,7 +412,7 @@ impl SubgraphInstanceManager { chain, templates, unified_api_version, - static_filters: self.static_filters, + static_filters, manifest_idx_and_name, poi_version, network, @@ -366,6 +433,27 @@ impl SubgraphInstanceManager { stream: block_stream_metrics, }; + Ok(SubgraphRunner::new( + inputs, + ctx, + logger.cheap_clone(), + metrics, + env_vars, + )) + } + + async fn start_subgraph_inner( + &self, + logger: Logger, + deployment: DeploymentLocator, + runner: SubgraphRunner>, + ) -> Result<(), Error> + where + ::MappingTrigger: ToAscPtr, + { + let registry = self.metrics_registry.cheap_clone(); + let subgraph_metrics_unregister = runner.metrics.subgraph.cheap_clone(); + // Keep restarting the subgraph until it terminates. The subgraph // will usually only run once, but is restarted whenever a block // creates dynamic data sources. This allows us to recreate the @@ -380,7 +468,6 @@ impl SubgraphInstanceManager { // it has a dedicated OS thread so the OS will handle the preemption. See // https://github.com/tokio-rs/tokio/issues/3493. graph::spawn_thread(deployment.to_string(), move || { - let runner = SubgraphRunner::new(inputs, ctx, logger.cheap_clone(), metrics); if let Err(e) = graph::block_on(task::unconstrained(runner.run())) { error!( &logger, diff --git a/core/src/subgraph/loader.rs b/core/src/subgraph/loader.rs index 3e5931984db..797219f9502 100644 --- a/core/src/subgraph/loader.rs +++ b/core/src/subgraph/loader.rs @@ -9,8 +9,8 @@ pub async fn load_dynamic_data_sources( store: Arc, logger: Logger, manifest: &SubgraphManifest, - manifest_idx_and_name: Vec<(u32, String)>, ) -> Result>, Error> { + let manifest_idx_and_name = manifest.template_idx_and_name().collect(); let start_time = Instant::now(); let mut data_sources: Vec> = vec![]; @@ -25,7 +25,7 @@ pub async fn load_dynamic_data_sources( .find(|template| template.manifest_idx() == stored.manifest_idx) .ok_or_else(|| anyhow!("no template with idx `{}` was found", stored.manifest_idx))?; - let ds = DataSource::from_stored_dynamic_data_source(&template, stored)?; + let ds = DataSource::from_stored_dynamic_data_source(template, stored)?; // The data sources are ordered by the creation block. // See also 8f1bca33-d3b7-4035-affc-fd6161a12448. diff --git a/core/src/subgraph/mod.rs b/core/src/subgraph/mod.rs index 490c45f791c..45f8d5b98ef 100644 --- a/core/src/subgraph/mod.rs +++ b/core/src/subgraph/mod.rs @@ -13,4 +13,5 @@ mod trigger_processor; pub use self::instance_manager::SubgraphInstanceManager; pub use self::provider::SubgraphAssignmentProvider; pub use self::registrar::SubgraphRegistrar; +pub use self::runner::SubgraphRunner; pub use self::trigger_processor::*; diff --git a/core/src/subgraph/provider.rs b/core/src/subgraph/provider.rs index c513cb3144e..4d3a0cab51d 100644 --- a/core/src/subgraph/provider.rs +++ b/core/src/subgraph/provider.rs @@ -81,10 +81,8 @@ impl SubgraphAssignmentProviderTrait for SubgraphAss .remove(&deployment.id) { // Shut down subgraph processing - self.instance_manager.stop_subgraph(deployment); - Ok(()) - } else { - Err(SubgraphAssignmentProviderError::NotRunning(deployment)) + self.instance_manager.stop_subgraph(deployment).await; } + Ok(()) } } diff --git a/core/src/subgraph/registrar.rs b/core/src/subgraph/registrar.rs index 8c4a2156a3e..3ca52d98bef 100644 --- a/core/src/subgraph/registrar.rs +++ b/core/src/subgraph/registrar.rs @@ -88,8 +88,7 @@ where // - The event stream sees a Remove event for subgraph B, but the table query finds that // subgraph B has already been removed. // The `handle_assignment_events` function handles these cases by ignoring AlreadyRunning - // (on subgraph start) or NotRunning (on subgraph stop) error types, which makes the - // operations idempotent. + // (on subgraph start) which makes the operations idempotent. Subgraph stop is already idempotent. // Start event stream let assignment_event_stream = self.assignment_events(); @@ -446,13 +445,15 @@ async fn handle_assignment_event( AssignmentEvent::Add { deployment, node_id: _, - } => Ok(start_subgraph(deployment, provider.clone(), logger).await), + } => { + start_subgraph(deployment, provider.clone(), logger).await; + Ok(()) + } AssignmentEvent::Remove { deployment, node_id: _, } => match provider.stop(deployment).await { Ok(()) => Ok(()), - Err(SubgraphAssignmentProviderError::NotRunning(_)) => Ok(()), Err(e) => Err(CancelableError::Error(e)), }, } @@ -552,11 +553,12 @@ async fn create_subgraph_version( version_switching_mode: SubgraphVersionSwitchingMode, resolver: &Arc, ) -> Result { + let raw_string = serde_yaml::to_string(&raw).unwrap(); let unvalidated = UnvalidatedSubgraphManifest::::resolve( deployment, raw, - &resolver, - &logger, + resolver, + logger, ENV_VARS.max_spec_version.clone(), ) .map_err(SubgraphRegistrarError::ResolveError) @@ -598,7 +600,7 @@ async fn create_subgraph_version( graft.base.clone(), match graft_block_override { Some(block) => block, - None => resolve_graft_block(&graft, &*chain, &logger).await?, + None => resolve_graft_block(graft, &*chain, &logger).await?, }, )), }; @@ -616,11 +618,30 @@ async fn create_subgraph_version( "block" => format!("{:?}", base_block.as_ref().map(|(_,ptr)| ptr.number)) ); + // Entity types that may be touched by offchain data sources need a causality region column. + let needs_causality_region = manifest + .data_sources + .iter() + .filter_map(|ds| ds.as_offchain()) + .map(|ds| ds.mapping.entities.iter()) + .chain( + manifest + .templates + .iter() + .filter_map(|ds| ds.as_offchain()) + .map(|ds| ds.mapping.entities.iter()), + ) + .flatten() + .cloned() + .collect(); + // Apply the subgraph versioning and deployment operations, // creating a new subgraph deployment if one doesn't exist. - let deployment = DeploymentCreate::new(&manifest, start_block) + let deployment = DeploymentCreate::new(raw_string, &manifest, start_block) .graft(base_block) - .debug(debug_fork); + .debug(debug_fork) + .entities_with_causality_region(needs_causality_region); + deployment_store .create_subgraph_deployment( name, @@ -630,5 +651,5 @@ async fn create_subgraph_version( network_name, version_switching_mode, ) - .map_err(|e| SubgraphRegistrarError::SubgraphDeploymentError(e)) + .map_err(SubgraphRegistrarError::SubgraphDeploymentError) } diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index 9ae886ce805..515fa53c066 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -5,21 +5,23 @@ use crate::subgraph::state::IndexingState; use crate::subgraph::stream::new_block_stream; use atomic_refcell::AtomicRefCell; use graph::blockchain::block_stream::{BlockStreamEvent, BlockWithTriggers, FirehoseCursor}; -use graph::blockchain::{Block, Blockchain, TriggerFilter as _}; +use graph::blockchain::{Block, Blockchain, DataSource as _, TriggerFilter as _}; use graph::components::store::{EmptyStore, EntityKey, StoredDynamicDataSource}; use graph::components::{ store::ModificationsAndCache, - subgraph::{CausalityRegion, MappingError, ProofOfIndexing, SharedProofOfIndexing}, + subgraph::{MappingError, PoICausalityRegion, ProofOfIndexing, SharedProofOfIndexing}, }; use graph::data::store::scalar::Bytes; use graph::data::subgraph::{ schema::{SubgraphError, SubgraphHealth, POI_OBJECT}, SubgraphFeature, }; -use graph::data_source::{offchain, DataSource, TriggerData}; +use graph::data_source::{ + offchain, CausalityRegion, DataSource, DataSourceCreationError, DataSourceTemplate, TriggerData, +}; +use graph::env::EnvVars; use graph::prelude::*; use graph::util::{backoff::ExponentialBackoff, lfu_cache::LfuCache}; -use std::convert::TryFrom; use std::sync::Arc; use std::time::{Duration, Instant}; @@ -27,7 +29,7 @@ const MINUTE: Duration = Duration::from_secs(60); const SKIP_PTR_UPDATES_THRESHOLD: Duration = Duration::from_secs(60 * 5); -pub(crate) struct SubgraphRunner +pub struct SubgraphRunner where C: Blockchain, T: RuntimeHostBuilder, @@ -36,7 +38,7 @@ where state: IndexingState, inputs: Arc>, logger: Logger, - metrics: RunnerMetrics, + pub metrics: RunnerMetrics, } impl SubgraphRunner @@ -49,6 +51,7 @@ where ctx: IndexingContext, logger: Logger, metrics: RunnerMetrics, + env_vars: Arc, ) -> Self { Self { inputs: Arc::new(inputs), @@ -57,7 +60,10 @@ where should_try_unfail_non_deterministic: true, synced: false, skip_ptr_updates_timer: Instant::now(), - backoff: ExponentialBackoff::new(MINUTE * 2, ENV_VARS.subgraph_error_retry_ceil), + backoff: ExponentialBackoff::new( + (MINUTE * 2).min(env_vars.subgraph_error_retry_ceil), + env_vars.subgraph_error_retry_ceil, + ), entity_lfu_cache: LfuCache::new(), }, logger, @@ -65,7 +71,40 @@ where } } - pub async fn run(mut self) -> Result<(), Error> { + /// Revert the state to a previous block. When handling revert operations + /// or failed block processing, it is necessary to remove part of the existing + /// in-memory state to keep it constent with DB changes. + /// During block processing new dynamic data sources are added directly to the + /// SubgraphInstance of the runner. This means that if, for whatever reason, + /// the changes don;t complete then the remnants of that block processing must + /// be removed. The same thing also applies to the block cache. + /// This function must be called before continuing to process in order to avoid + /// duplicated host insertion and POI issues with dirty entity changes. + fn revert_state(&mut self, block_number: BlockNumber) -> Result<(), Error> { + self.state.entity_lfu_cache = LfuCache::new(); + + // 1. Revert all hosts(created by DDS) up to block_number inclusively. + // 2. Unmark any offchain data sources that were marked done on the blocks being removed. + // When no offchain datasources are present, 2. should be a noop. + self.ctx.revert_data_sources(block_number)?; + Ok(()) + } + + #[cfg(debug_assertions)] + pub fn context(&self) -> &IndexingContext { + &self.ctx + } + + #[cfg(debug_assertions)] + pub async fn run_for_test(self, break_on_restart: bool) -> Result { + self.run_inner(break_on_restart).await + } + + pub async fn run(self) -> Result { + self.run_inner(false).await + } + + async fn run_inner(mut self, break_on_restart: bool) -> Result { // If a subgraph failed for deterministic reasons, before start indexing, we first // revert the deployment head. It should lead to the same result since the error was // deterministic. @@ -96,10 +135,11 @@ where let block_stream_canceler = CancelGuard::new(); let block_stream_cancel_handle = block_stream_canceler.handle(); - let mut block_stream = new_block_stream(&self.inputs, &self.ctx.filter) - .await? - .map_err(CancelableError::Error) - .cancelable(&block_stream_canceler, || Err(CancelableError::Cancel)); + let mut block_stream = + new_block_stream(&self.inputs, &self.ctx.filter, &self.metrics.subgraph) + .await? + .map_err(CancelableError::Error) + .cancelable(&block_stream_canceler, || Err(CancelableError::Cancel)); // Keep the stream's cancel guard around to be able to shut it down when the subgraph // deployment is unassigned @@ -129,7 +169,12 @@ where Action::Stop => { info!(self.logger, "Stopping subgraph"); self.inputs.store.flush().await?; - return Ok(()); + return Ok(self); + } + Action::Restart if break_on_restart => { + info!(self.logger, "Stopping subgraph on break"); + self.inputs.store.flush().await?; + return Ok(self); } Action::Restart => break, }; @@ -174,7 +219,7 @@ where }; // Causality region for onchain triggers. - let causality_region = CausalityRegion::from_network(&self.inputs.network); + let causality_region = PoICausalityRegion::from_network(&self.inputs.network); // Process events one after the other, passing in entity operations // collected previously to every new event being processed @@ -230,6 +275,17 @@ where data_sources.iter().filter_map(DataSource::as_onchain), ); + let block: Arc = if self.inputs.chain.is_refetch_block_required() { + Arc::new( + self.inputs + .chain + .refetch_firehose_block(&logger, firehose_cursor.clone()) + .await?, + ) + } else { + block.cheap_clone() + }; + // Reprocess the triggers from this block that match the new data sources let block_with_triggers = self .inputs @@ -254,7 +310,7 @@ where // Add entity operations for the new data sources to the block state // and add runtimes for the data sources to the subgraph instance. - self.persist_dynamic_data_sources(&mut block_state.entity_cache, data_sources); + self.persist_dynamic_data_sources(&mut block_state, data_sources); // Process the triggers in each host in the same order the // corresponding data sources have been created. @@ -317,7 +373,6 @@ where .start_section("as_modifications"); let ModificationsAndCache { modifications: mut mods, - data_sources, entity_lfu_cache: cache, } = block_state .entity_cache @@ -328,8 +383,9 @@ where // Check for offchain events and process them, including their entity modifications in the // set to be transacted. let offchain_events = self.ctx.offchain_monitor.ready_offchain_events()?; - let (offchain_mods, offchain_to_remove) = - self.handle_offchain_triggers(offchain_events).await?; + let (offchain_mods, processed_data_sources) = self + .handle_offchain_triggers(offchain_events, &block) + .await?; mods.extend(offchain_mods); // Put the cache back in the state, asserting that the placeholder cache was not used. @@ -342,7 +398,7 @@ where let err_count = block_state.deterministic_errors.len(); for (i, e) in block_state.deterministic_errors.iter().enumerate() { - let message = format!("{:#}", e).replace("\n", "\t"); + let message = format!("{:#}", e).replace('\n', "\t"); error!(&logger, "Subgraph error {}/{}", i + 1, err_count; "error" => message, "code" => LogCode::SubgraphSyncingFailure @@ -370,6 +426,7 @@ where let BlockState { deterministic_errors, + persisted_data_sources, .. } = block_state; @@ -381,10 +438,10 @@ where firehose_cursor, mods, &self.metrics.host.stopwatch, - data_sources, + persisted_data_sources, deterministic_errors, self.inputs.manifest_idx_and_name.clone(), - offchain_to_remove, + processed_data_sources, ) .await .context("Failed to transact block operations")?; @@ -471,7 +528,27 @@ where for info in created_data_sources { // Try to instantiate a data source from the template - let data_source = DataSource::try_from(info)?; + + let data_source = { + let res = match info.template { + DataSourceTemplate::Onchain(_) => C::DataSource::from_template_info(info) + .map(DataSource::Onchain) + .map_err(DataSourceCreationError::from), + DataSourceTemplate::Offchain(_) => offchain::DataSource::from_template_info( + info, + self.ctx.causality_region_next_value(), + ) + .map(DataSource::Offchain), + }; + match res { + Ok(ds) => ds, + Err(e @ DataSourceCreationError::Ignore(..)) => { + warn!(self.logger, "{}", e.to_string()); + continue; + } + Err(DataSourceCreationError::Unknown(e)) => return Err(e), + } + }; // Try to create a runtime host for the data source let host = self @@ -486,11 +563,11 @@ where None => { warn!( self.logger, - "no runtime hosted created, there is already a runtime host instantiated for \ + "no runtime host created, there is already a runtime host instantiated for \ this data source"; "name" => &data_source.name(), "address" => &data_source.address() - .map(|address| hex::encode(address)) + .map(hex::encode) .unwrap_or("none".to_string()), ) } @@ -502,7 +579,7 @@ where fn persist_dynamic_data_sources( &mut self, - entity_cache: &mut EntityCache, + block_state: &mut BlockState, data_sources: Vec>, ) { if !data_sources.is_empty() { @@ -520,9 +597,9 @@ where self.logger, "Persisting data_source"; "name" => &data_source.name(), - "address" => &data_source.address().map(|address| hex::encode(address)).unwrap_or("none".to_string()), + "address" => &data_source.address().map(hex::encode).unwrap_or("none".to_string()), ); - entity_cache.add_data_source(data_source); + block_state.persist_data_source(data_source.as_stored_dynamic_data_source()); } // Merge filters from data sources into the block stream builder @@ -564,9 +641,10 @@ where async fn handle_offchain_triggers( &mut self, triggers: Vec, + block: &Arc, ) -> Result<(Vec, Vec), Error> { let mut mods = vec![]; - let mut offchain_to_remove = vec![]; + let mut processed_data_sources = vec![]; for trigger in triggers { // Using an `EmptyStore` and clearing the cache for each trigger is a makeshift way to @@ -575,20 +653,19 @@ where let mut block_state = BlockState::::new(EmptyStore::new(schema), LfuCache::new()); // PoI ignores offchain events. + // See also: poi-ignores-offchain let proof_of_indexing = None; let causality_region = ""; - // We'll eventually need to do better here, but using an empty block works for now. - let block = Arc::default(); block_state = self .ctx .process_trigger( &self.logger, - &block, + block, &TriggerData::Offchain(trigger), block_state, &proof_of_indexing, - &causality_region, + causality_region, &self.inputs.debug_fork, &self.metrics.subgraph, ) @@ -607,11 +684,17 @@ where "Attempted to create data source in offchain data source handler. This is not yet supported.", ); + // This propagates any deterministic error as a non-deterministic one. Which might make + // sense considering offchain data sources are non-deterministic. + if let Some(err) = block_state.deterministic_errors.into_iter().next() { + return Err(anyhow!("{}", err.to_string())); + } + mods.extend(block_state.entity_cache.as_modifications()?.modifications); - offchain_to_remove.extend(block_state.offchain_to_remove); + processed_data_sources.extend(block_state.processed_data_sources); } - Ok((mods, offchain_to_remove)) + Ok((mods, processed_data_sources)) } } @@ -685,7 +768,7 @@ where let start = Instant::now(); - let res = self.process_block(&cancel_handle, block, cursor).await; + let res = self.process_block(cancel_handle, block, cursor).await; let elapsed = start.elapsed().as_secs_f64(); self.metrics @@ -763,18 +846,10 @@ where // Handle unexpected stream errors by marking the subgraph as failed. Err(e) => { - // Clear entity cache when a subgraph fails. - // - // This is done to be safe and sure that there's no state that's - // out of sync from the database. - // - // Without it, POI changes on failure would be kept in the entity cache - // and be transacted incorrectly in the next run. - self.state.entity_lfu_cache = LfuCache::new(); - self.metrics.stream.deployment_failed.set(1.0); + self.revert_state(block_ptr.block_number())?; - let message = format!("{:#}", e).replace("\n", "\t"); + let message = format!("{:#}", e).replace('\n', "\t"); let err = anyhow!("{}, code: {}", message, LogCode::SubgraphSyncingFailure); let deterministic = e.is_deterministic(); @@ -828,7 +903,7 @@ where .unwrap() .remove(&self.inputs.deployment.id); - let message = format!("{:#}", e).replace("\n", "\t"); + let message = format!("{:#}", e).replace('\n', "\t"); error!(self.logger, "Subgraph failed with non-deterministic error: {}", message; "attempt" => self.state.backoff.attempt, "retry_delay_s" => self.state.backoff.delay().as_secs()); @@ -884,11 +959,7 @@ where .deployment_head .set(subgraph_ptr.number as f64); - // Revert the in-memory state: - // - Revert any dynamic data sources. - // - Clear the entity cache. - self.ctx.revert_data_sources(subgraph_ptr.number); - self.state.entity_lfu_cache = LfuCache::new(); + self.revert_state(subgraph_ptr.number)?; Ok(Action::Continue) } @@ -928,7 +999,14 @@ async fn update_proof_of_indexing( // Create the special POI entity key specific to this causality_region let entity_key = EntityKey { entity_type: POI_OBJECT.to_owned(), + + // There are two things called causality regions here, one is the causality region for + // the poi which is a string and the PoI entity id. The other is the data source + // causality region to which the PoI belongs as an entity. Currently offchain events do + // not affect PoI so it is assumed to be `ONCHAIN`. + // See also: poi-ignores-offchain entity_id: causality_region.into(), + causality_region: CausalityRegion::ONCHAIN, }; // Grab the current digest attribute on this entity diff --git a/core/src/subgraph/stream.rs b/core/src/subgraph/stream.rs index 9733f0d206d..edbaac4ea65 100644 --- a/core/src/subgraph/stream.rs +++ b/core/src/subgraph/stream.rs @@ -1,7 +1,7 @@ use crate::subgraph::inputs::IndexingInputs; use graph::blockchain::block_stream::{BlockStream, BufferedBlockStream}; use graph::blockchain::Blockchain; -use graph::prelude::Error; +use graph::prelude::{Error, SubgraphInstanceMetrics}; use std::sync::Arc; const BUFFERED_BLOCK_STREAM_SIZE: usize = 100; @@ -10,6 +10,7 @@ const BUFFERED_FIREHOSE_STREAM_SIZE: usize = 1; pub async fn new_block_stream( inputs: &IndexingInputs, filter: &C::TriggerFilter, + metrics: &SubgraphInstanceMetrics, ) -> Result>, Error> { let is_firehose = inputs.chain.is_firehose_supported(); @@ -37,10 +38,13 @@ pub async fn new_block_stream( inputs.unified_api_version.clone(), ), } - .await?; + .await; + if is_firehose && block_stream.is_err() { + metrics.firehose_connection_errors.inc(); + } Ok(BufferedBlockStream::spawn_from_stream( - block_stream, + block_stream?, buffer_size, )) } diff --git a/core/src/subgraph/trigger_processor.rs b/core/src/subgraph/trigger_processor.rs index f8d3e78122b..2eeb8275500 100644 --- a/core/src/subgraph/trigger_processor.rs +++ b/core/src/subgraph/trigger_processor.rs @@ -1,9 +1,9 @@ use async_trait::async_trait; -use graph::blockchain::Blockchain; +use graph::blockchain::{Block, Blockchain}; use graph::cheap_clone::CheapClone; use graph::components::store::SubgraphFork; use graph::components::subgraph::{MappingError, SharedProofOfIndexing}; -use graph::data_source::TriggerData; +use graph::data_source::{MappingTrigger, TriggerData, TriggerWithHandler}; use graph::prelude::tokio::time::Instant; use graph::prelude::{ BlockState, RuntimeHost, RuntimeHostBuilder, SubgraphInstanceMetrics, TriggerProcessor, @@ -33,21 +33,35 @@ where ) -> Result, MappingError> { let error_count = state.deterministic_errors.len(); + let mut host_mapping: Vec<(&T::Host, TriggerWithHandler>)> = vec![]; + + { + let _section = subgraph_metrics.stopwatch.start_section("match_and_decode"); + + for host in hosts { + let mapping_trigger = match host.match_and_decode(trigger, block, logger)? { + // Trigger matches and was decoded as a mapping trigger. + Some(mapping_trigger) => mapping_trigger, + + // Trigger does not match, do not process it. + None => continue, + }; + + host_mapping.push((host, mapping_trigger)); + } + } + + if host_mapping.is_empty() { + return Ok(state); + } + if let Some(proof_of_indexing) = proof_of_indexing { proof_of_indexing .borrow_mut() .start_handler(causality_region); } - for host in hosts { - let mapping_trigger = match host.match_and_decode(trigger, block, logger)? { - // Trigger matches and was decoded as a mapping trigger. - Some(mapping_trigger) => mapping_trigger, - - // Trigger does not match, do not process it. - None => continue, - }; - + for (host, mapping_trigger) in host_mapping { let start = Instant::now(); state = host .process_mapping_trigger( @@ -62,11 +76,12 @@ where let elapsed = start.elapsed().as_secs_f64(); subgraph_metrics.observe_trigger_processing_duration(elapsed); - if host.data_source().as_offchain().is_some() { + if let Some(ds) = host.data_source().as_offchain() { + ds.mark_processed_at(block.number()); // Remove this offchain data source since it has just been processed. state - .offchain_to_remove - .push(host.data_source().as_stored_dynamic_data_source()); + .processed_data_sources + .push(ds.as_stored_dynamic_data_source()); } } @@ -78,7 +93,7 @@ where // ProofOfIndexingEvent::DeterministicError to the SharedProofOfIndexing. proof_of_indexing .borrow_mut() - .write_deterministic_error(&logger, causality_region); + .write_deterministic_error(logger, causality_region); } } diff --git a/core/tests/fixtures/ipfs_folder/hello.txt b/core/tests/fixtures/ipfs_folder/hello.txt new file mode 100644 index 00000000000..3b18e512dba --- /dev/null +++ b/core/tests/fixtures/ipfs_folder/hello.txt @@ -0,0 +1 @@ +hello world diff --git a/core/tests/interfaces.rs b/core/tests/interfaces.rs index 45f81099424..2c5916ea853 100644 --- a/core/tests/interfaces.rs +++ b/core/tests/interfaces.rs @@ -25,7 +25,7 @@ async fn insert_and_query( let document = graphql_parser::parse_query(query).unwrap().into_static(); let target = QueryTarget::Deployment(subgraph_id, Default::default()); - let query = Query::new(document, None); + let query = Query::new(document, None, false); Ok(execute_subgraph_query(query, target) .await .first() @@ -1481,12 +1481,12 @@ async fn derived_interface_bytes() { id: Bytes!, trades: [Trade!]! @derivedFrom(field: "pool") } - + interface Trade { id: Bytes! pool: Pool! } - + type Sell implements Trade @entity { id: Bytes! pool: Pool! diff --git a/docker/Dockerfile b/docker/Dockerfile index 16a38350702..8c0a8e19919 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -4,7 +4,16 @@ # by running something like the following # docker build --target STAGE -f docker/Dockerfile . -FROM rust:buster as graph-node-build +FROM golang:bullseye as envsubst + +# v1.2.0 +ARG ENVSUBST_COMMIT_SHA=16035fe3571ad42c7796bf554f978bb2df64231b +# We ship `envsubst` with the final image to facilitate env. var. templating in +# the configuration file. +RUN go install github.com/a8m/envsubst/cmd/envsubst@$ENVSUBST_COMMIT_SHA \ + && strip -g /go/bin/envsubst + +FROM rust:bullseye as graph-node-build ARG COMMIT_SHA=unknown ARG REPO_NAME=unknown @@ -13,22 +22,26 @@ ARG TAG_NAME=unknown ADD . /graph-node -RUN cd /graph-node \ - && apt-get update && apt-get install -y cmake \ - && rustup component add rustfmt \ - && RUSTFLAGS="-g" cargo install --locked --path node \ +RUN apt-get update \ + && apt-get install -y cmake protobuf-compiler && \ + cd /graph-node && \ + RUSTFLAGS="-g" cargo build --release --package graph-node \ + && cp target/release/graph-node /usr/local/bin/graph-node \ + && cp target/release/graphman /usr/local/bin/graphman \ + # Reduce the size of the layer by removing unnecessary files. && cargo clean \ - && objcopy --only-keep-debug /usr/local/cargo/bin/graph-node /usr/local/cargo/bin/graph-node.debug \ - && strip -g /usr/local/cargo/bin/graph-node \ - && strip -g /usr/local/cargo/bin/graphman \ - && cd /usr/local/cargo/bin \ + && objcopy --only-keep-debug /usr/local/bin/graph-node /usr/local/bin/graph-node.debug \ + && strip -g /usr/local/bin/graph-node \ + && strip -g /usr/local/bin/graphman \ + && cd /usr/local/bin \ && objcopy --add-gnu-debuglink=graph-node.debug graph-node \ && echo "REPO_NAME='$REPO_NAME'" > /etc/image-info \ && echo "TAG_NAME='$TAG_NAME'" >> /etc/image-info \ && echo "BRANCH_NAME='$BRANCH_NAME'" >> /etc/image-info \ && echo "COMMIT_SHA='$COMMIT_SHA'" >> /etc/image-info \ && echo "CARGO_VERSION='$(cargo --version)'" >> /etc/image-info \ - && echo "RUST_VERSION='$(rustc --version)'" >> /etc/image-info + && echo "RUST_VERSION='$(rustc --version)'" >> /etc/image-info \ + && echo "CARGO_DEV_BUILD='$CARGO_DEV_BUILD'" >> /etc/image-info # Debug image to access core dumps FROM graph-node-build as graph-node-debug @@ -39,7 +52,7 @@ COPY docker/Dockerfile /Dockerfile COPY docker/bin/* /usr/local/bin/ # The graph-node runtime image with only the executable -FROM debian:buster-slim as graph-node +FROM debian:bullseye-slim as graph-node ENV RUST_LOG "" ENV GRAPH_LOG "" ENV EARLY_LOG_CHUNK_SIZE "" @@ -86,7 +99,8 @@ RUN apt-get update \ && apt-get install -y libpq-dev ca-certificates netcat ADD docker/wait_for docker/start /usr/local/bin/ -COPY --from=graph-node-build /usr/local/cargo/bin/graph-node /usr/local/cargo/bin/graphman /usr/local/bin/ +COPY --from=graph-node-build /usr/local/bin/graph-node /usr/local/bin/graphman /usr/local/bin/ COPY --from=graph-node-build /etc/image-info /etc/image-info +COPY --from=envsubst /go/bin/envsubst /usr/local/bin/ COPY docker/Dockerfile /Dockerfile -CMD start +CMD ["start"] diff --git a/docker/README.md b/docker/README.md index 743a6129f77..326a3535e9f 100644 --- a/docker/README.md +++ b/docker/README.md @@ -37,23 +37,6 @@ to connect to. By default, it will use `mainnet:http://host.docker.internal:8545 in order to connect to an Ethereum node running on your host machine. You can replace this with anything else in `docker-compose.yaml`. -> **Note for Linux users:** On Linux, `host.docker.internal` is not -> currently supported. Instead, you will have to replace it with the -> IP address of your Docker host (from the perspective of the Graph -> Node container). -> To do this, run: -> -> ``` -> CONTAINER_ID=$(docker container ls | grep graph-node | cut -d' ' -f1) -> docker exec $CONTAINER_ID /bin/bash -c 'apt install -y iproute2 && ip route' | awk '/^default via /{print $3}' -> ``` -> -> This will print the host's IP address. Then, put it into `docker-compose.yml`: -> -> ``` -> sed -i -e 's/host.docker.internal//g' docker-compose.yml -> ``` - After you have set up an Ethereum node—e.g. Ganache or Parity—simply clone this repository and run diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 60c188b71d9..742de12649d 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -40,6 +40,10 @@ services: POSTGRES_USER: graph-node POSTGRES_PASSWORD: let-me-in POSTGRES_DB: graph-node - PGDATA: "/data/postgres" + # FIXME: remove this env. var. which we shouldn't need. Introduced by + # , maybe as a + # workaround for https://github.com/docker/for-mac/issues/6270? + PGDATA: "/var/lib/postgresql/data" + POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C" volumes: - ./data/postgres:/var/lib/postgresql/data diff --git a/docker/hooks/post_checkout b/docker/hooks/post_checkout deleted file mode 100755 index f1b6f189ba1..00000000000 --- a/docker/hooks/post_checkout +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -set -e -set -x - -echo "Setting SOURCE_BRANCH to ${SOURCE_BRANCH}" - -sed -i "s@^ENV SOURCE_BRANCH \"master\"@ENV SOURCE_BRANCH \"${SOURCE_BRANCH}\"@g" Dockerfile diff --git a/docker/start b/docker/start index 0c003ccefd1..bbeabd166a3 100755 --- a/docker/start +++ b/docker/start @@ -32,12 +32,12 @@ save_coredumps() { wait_for_ipfs() { # Take the IPFS URL in $1 apart and extract host and port. If no explicit - # host is given, use 443 for https, and 80 otherwise - if [[ "$1" =~ ^((https?)://)?([^:/]+)(:([0-9]+))? ]] + # port is given, use 443 for https, and 80 otherwise + if [[ "$1" =~ ^((https?)://)?((.*)@)?([^:/]+)(:([0-9]+))? ]] then proto=${BASH_REMATCH[2]:-http} - host=${BASH_REMATCH[3]} - port=${BASH_REMATCH[5]} + host=${BASH_REMATCH[5]} + port=${BASH_REMATCH[7]} if [ -z "$port" ] then [ "$proto" = "https" ] && port=443 || port=80 @@ -80,24 +80,29 @@ run_graph_node() { } start_query_node() { + # Query nodes are never the block ingestor export DISABLE_BLOCK_INGESTOR=true run_graph_node } start_index_node() { - # Only the index node with the name set in BLOCK_INGESTOR should ingest - # blocks - if [[ ${node_id} != "${BLOCK_INGESTOR}" ]]; then - export DISABLE_BLOCK_INGESTOR=true - fi - run_graph_node } start_combined_node() { + # No valid reason to disable the block ingestor in this case. + unset DISABLE_BLOCK_INGESTOR run_graph_node } +# Only the index node with the name set in BLOCK_INGESTOR should ingest +# blocks. For historical reasons, that name is set to the unmangled version +# of `node_id` and we need to check whether we are the block ingestor +# before possibly mangling the node_id. +if [[ ${node_id} != "${BLOCK_INGESTOR}" ]]; then + export DISABLE_BLOCK_INGESTOR=true +fi + # Allow operators to opt out of legacy character # restrictions on the node ID by setting enablement # variable to a non-zero length string: diff --git a/docs/config.md b/docs/config.md index 8b65e2511a3..8020334918c 100644 --- a/docs/config.md +++ b/docs/config.md @@ -11,6 +11,11 @@ The TOML file consists of four sections: * `[ingestor]` sets the name of the node responsible for block ingestion. * `[deployment]` describes how to place newly deployed subgraphs. +Some of these sections support environment variable expansion out of the box, +most notably Postgres connection strings. The official `graph-node` Docker image +includes [`envsubst`](https://github.com/a8m/envsubst) for more complex use +cases. + ## Configuring Multiple Databases For most use cases, a single Postgres database is sufficient to support a diff --git a/docs/environment-variables.md b/docs/environment-variables.md index e52e529f4b6..84d6f078438 100644 --- a/docs/environment-variables.md +++ b/docs/environment-variables.md @@ -69,7 +69,7 @@ those. ## IPFS - `GRAPH_IPFS_TIMEOUT`: timeout for IPFS, which includes requests for manifest files - and from mappings (in seconds, default is 30). + and from mappings (in seconds, default is 60). - `GRAPH_MAX_IPFS_FILE_BYTES`: maximum size for a file that can be retrieved (in bytes, default is 256 MiB). - `GRAPH_MAX_IPFS_MAP_FILE_SIZE`: maximum size of files that can be processed with `ipfs.map`. When a file is processed through `ipfs.map`, the entities @@ -78,7 +78,8 @@ those. may use (in bytes, defaults to 256MB). - `GRAPH_MAX_IPFS_CACHE_SIZE`: maximum number of files cached (defaults to 50). - `GRAPH_MAX_IPFS_CACHE_FILE_SIZE`: maximum size of each cached file (in bytes, defaults to 1MiB). -- `GRAPH_MAX_IPFS_CONCURRENT_REQUESTS`: maximum concurrent requests to IPFS from file data sources (defaults to 100). +- `GRAPH_IPFS_REQUEST_LIMIT`: Limits both concurrent and per second requests to IPFS for file data + sources. Defaults to 100. ## GraphQL @@ -122,6 +123,17 @@ those. - `SILENT_GRAPHQL_VALIDATIONS`: If `ENABLE_GRAPHQL_VALIDATIONS` is enabled, you are also able to just silently print the GraphQL validation errors, without failing the actual query. Note: queries might still fail as part of the later stage validations running, during GraphQL engine execution. +- `GRAPH_GRAPHQL_DISABLE_BOOL_FILTERS`: disables the ability to use AND/OR + filters. This is useful if we want to disable filters because of + performance reasons. +- `GRAPH_GRAPHQL_DISABLE_CHILD_SORTING`: disables the ability to use child-based + sorting. This is useful if we want to disable child-based sorting because of + performance reasons. +- `GRAPH_GRAPHQL_TRACE_TOKEN`: the token to use to enable query tracing for + a GraphQL request. If this is set, requests that have a header + `X-GraphTraceQuery` set to this value will include a trace of the SQL + queries that were run. Defaults to the empty string which disables + tracing. ### GraphQL caching @@ -199,3 +211,14 @@ those. identified as unused, `graph-node` will wait at least this long before actually deleting the data (value is in minutes, defaults to 360, i.e. 6 hours) +- `GRAPH_ALLOW_NON_DETERMINISTIC_IPFS`: enables indexing of subgraphs which + use `ipfs.cat` as part of subgraph mappings. **This is an experimental + feature which is not deterministic, and will be removed in future**. +- `GRAPH_STORE_BATCH_TARGET_DURATION`: How long batch operations during + copying or grafting should take. This limits how long transactions for + such long running operations will be, and therefore helps control bloat + in other tables. Value is in seconds and defaults to 180s. +- `GRAPH_START_BLOCK`: block hash:block number where the forked subgraph will start indexing at. +- `GRAPH_FORK_BASE`: api url for where the graph node will fork from, use `https://api.thegraph.com/subgraphs/id/` + for the hosted service. +- `GRAPH_DEBUG_FORK`: the IPFS hash id of the subgraph to fork. diff --git a/docs/getting-started.md b/docs/getting-started.md index 213a54073ae..e7ea53a7ca1 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -171,7 +171,7 @@ type Token @entity { This `entity` tracks a single ERC721 token on Ethereum by its ID and the current owner. The **`ID` field is required** and stores values of the ID type, which are strings. The `ID` must be a unique value so that it can be placed into the store. For an ERC721 token, the unique ID could be the token ID because that value is unique to that coin. -The exclamation mark represents the fact that that field must be set when the entity is stored in the database, i.e., it cannot be `null`. See the [Schema API](graphql-api.md#3-schema) for a complete reference on defining the schema for The Graph. +The exclamation mark represents the fact that that field must be set when the entity is stored in the database, i.e., it cannot be `null`. See the [Schema API](https://github.com/graphprotocol/docs/blob/main/pages/en/querying/graphql-api.mdx#schema) for a complete reference on defining the schema for The Graph. When you complete the schema, add its path to the top-level `schema` key in the subgraph manifest. See the code below for an example: @@ -417,7 +417,7 @@ Depending on how many events have been emitted by your smart contracts, it could ## 3 Query the Local Graph Node With the subgraph deployed to the locally running Graph Node, visit http://127.0.0.1:8000/ to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. -We provide a few simple examples below, but please see the [Query API](graphql-api.md#1-queries) for a complete reference on how to query the subgraph's entities. +We provide a few simple examples below, but please see the [Query API](https://github.com/graphprotocol/docs/blob/main/pages/en/querying/graphql-api.mdx#queries) for a complete reference on how to query the subgraph's entities. Query the `Token` entities: ```graphql diff --git a/docs/graphman.md b/docs/graphman.md new file mode 100644 index 00000000000..0964efc6051 --- /dev/null +++ b/docs/graphman.md @@ -0,0 +1,416 @@ +## Graphman Commands + +- [Info](#info) +- [Remove](#remove) +- [Unassign](#unassign) +- [Unused Record](#unused-record) +- [Unused Remove](#unused-remove) +- [Drop](#drop) +- [Chain Check Blocks](#check-blocks) +- [Chain Call Cache Remove](#chain-call-cache-remove) + + +# ⌘ Info + +### SYNOPSIS + + Prints the details of a deployment + + The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database + namespace `sgdNNN`. Since the same IPFS hash can be deployed in multiple shards, it is possible to + specify the shard by adding `:shard` to the IPFS hash. + + USAGE: + graphman --config info [OPTIONS] + + ARGS: + + The deployment (see above) + + OPTIONS: + -c, --current + List only current version + + -h, --help + Print help information + + -p, --pending + List only pending versions + + -s, --status + Include status information + + -u, --used + List only used (current and pending) versions + +### DESCRIPTION + +The `info` command fetches details for a given deployment from the database. + +By default, it shows the following attributes for the deployment: + +- **name** +- **status** *(`pending` or `current`)* +- **id** *(the `Qm...` identifier for the deployment's subgraph)* +- **namespace** *(The database schema which contain's that deployment data tables)* +- **shard** +- **active** *(If there are multiple entries for the same subgraph, only one of them will be active. That's the one we use for querying)* +- **chain** +- **graph node id** + +### OPTIONS + +If the `--status` option is enabled, extra attributes are also returned: + +- **synced*** *(Whether or not the subgraph has synced all the way to the current chain head)* +- **health** *(Can be either `healthy`, `unhealthy` (syncing with errors) or `failed`)* +- **latest indexed block** +- **current chain head block** + +### EXAMPLES + +Describe a deployment by its name: + + graphman --config config.toml info subgraph-name + +Describe a deployment by its hash: + + graphman --config config.toml info QmfWRZCjT8pri4Amey3e3mb2Bga75Vuh2fPYyNVnmPYL66 + +Describe a deployment with extra info: + + graphman --config config.toml info QmfWRZCjT8pri4Amey3e3mb2Bga75Vuh2fPYyNVnmPYL66 --status + + +# ⌘ Remove + +### SYNOPSIS + + Remove a named subgraph + + USAGE: + graphman --config remove + + ARGS: + The name of the subgraph to remove + + OPTIONS: + -h, --help Print help information + +### DESCRIPTION + +Removes the association between a subgraph name and a deployment. + +No indexed data is lost as a result of this command. + +It is used mostly for stopping query traffic based on the subgraph's name, and to release that name for +another deployment to use. + +### EXAMPLES + +Remove a named subgraph: + + graphman --config config.toml remove subgraph-name + + +# ⌘ Unassign + +#### SYNOPSIS + + Unassign a deployment + + USAGE: + graphman --config unassign + + ARGS: + The deployment (see `help info`) + + OPTIONS: + -h, --help Print help information + +#### DESCRIPTION + +Makes `graph-node` stop indexing a deployment permanently. + +No indexed data is lost as a result of this command. + +Refer to the [Maintenance Documentation](https://github.com/graphprotocol/graph-node/blob/master/docs/maintenance.md#modifying-assignments) for more details about how Graph Node manages its deployment +assignments. + +#### EXAMPLES + +Unassign a deployment by its name: + + graphman --config config.toml unassign subgraph-name + +Unassign a deployment by its hash: + + graphman --config config.toml unassign QmfWRZCjT8pri4Amey3e3mb2Bga75Vuh2fPYyNVnmPYL66 + + +# ⌘ Unused Record + +### SYNOPSIS + + graphman-unused-record + Update and record currently unused deployments + + USAGE: + graphman unused record + + OPTIONS: + -h, --help Print help information + + +### DESCRIPTION + +Inspects every shard for unused deployments and registers them in the `unused_deployments` table in the +primary shard. + +No indexed data is lost as a result of this command. + +This sub-command is used as previus step towards removing all data from unused subgraphs, followed by +`graphman unused remove`. + +A deployment is unused if it fulfills all of these criteria: + +1. It is not assigned to a node. +2. It is either not marked as active or is neither the current or pending version of a subgraph. +3. It is not the source of a currently running copy operation + +### EXAMPLES + +To record all unused deployments: + + graphman --config config.toml unused record + + +# ⌘ Unused Remove + +### SYNOPSIS + + Remove deployments that were marked as unused with `record`. + + Deployments are removed in descending order of number of entities, i.e., smaller deployments are + removed before larger ones + + USAGE: + graphman unused remove [OPTIONS] + + OPTIONS: + -c, --count + How many unused deployments to remove (default: all) + + -d, --deployment + Remove a specific deployment + + -h, --help + Print help information + + -o, --older + Remove unused deployments that were recorded at least this many minutes ago + +### DESCRIPTION + +Removes from database all indexed data from deployments previously marked as unused by the `graphman unused +record` command. + +This operation is irreversible. + +### EXAMPLES + +Remove all unused deployments + + graphman --config config.toml unused remove + +Remove all unused deployments older than 12 hours (720 minutes) + + graphman --config config.toml unused remove --older 720 + +Remove a specific unused deployment + + graphman --config config.toml unused remove --deployment QmfWRZCjT8pri4Amey3e3mb2Bga75Vuh2fPYyNVnmPYL66 + + +# ⌘ Drop + +### SYNOPSIS + + Delete a deployment and all it's indexed data + + The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database + namespace `sgdNNN`. Since the same IPFS hash can be deployed in multiple shards, it is possible to + specify the shard by adding `:shard` to the IPFS hash. + + USAGE: + graphman --config drop [OPTIONS] + + ARGS: + + The deployment identifier + + OPTIONS: + -c, --current + Search only for current versions + + -f, --force + Skip confirmation prompt + + -h, --help + Print help information + + -p, --pending + Search only for pending versions + + -u, --used + Search only for used (current and pending) versions + +### DESCRIPTION + +Stops, unassigns and remove all data from deployments matching the search term. + +This operation is irreversible. + +This command is a combination of other graphman commands applied in sequence: + +1. `graphman info ` +2. `graphman unassign ` +3. `graphman remove ` +4. `graphman unused record` +5. `graphman unused remove ` + +### EXAMPLES + +Stop, unassign and delete all indexed data from a specific deployment by its deployment id + + graphman --config config.toml drop QmfWRZCjT8pri4Amey3e3mb2Bga75Vuh2fPYyNVnmPYL66 + + +Stop, unassign and delete all indexed data from a specific deployment by its subgraph name + + graphman --config config.toml drop autor/subgraph-name + + +# ⌘ Check Blocks + +### SYNOPSIS + + Compares cached blocks with fresh ones and clears the block cache when they differ + + USAGE: + graphman --config chain check-blocks + + FLAGS: + -h, --help Prints help information + -V, --version Prints version information + + ARGS: + Chain name (must be an existing chain, see 'chain list') + + SUBCOMMANDS: + by-hash The number of the target block + by-number The hash of the target block + by-range A block number range, inclusive on both ends + +### DESCRIPTION + +The `check-blocks` command compares cached blocks with blocks from a JSON RPC provider and removes any blocks +from the cache that differ from the ones retrieved from the provider. + +Sometimes JSON RPC providers send invalid block data to Graph Node. The `graphman chain check-blocks` command +is useful to diagnose the integrity of cached blocks and eventually fix them. + +### OPTIONS + +Blocks can be selected by different methods. The `check-blocks` command let's you use the block hash, a single +number or a number range to refer to which blocks it should verify: + +#### `by-hash` + + graphman --config chain check-blocks by-hash + +#### `by-number` + + graphman --config chain check-blocks by-number [--delete-duplicates] + +#### `by-range` + + graphman --config chain check-blocks by-range [-f|--from ] [-t|--to ] [--delete-duplicates] + +The `by-range` method lets you scan for numeric block ranges and offers the `--from` and `--to` options for +you to define the search bounds. If one of those options is ommited, `graphman` will consider an open bound +and will scan all blocks up to or after that number. + +Over time, it can happen that a JSON RPC provider offers different blocks for the same block number. In those +cases, `graphman` will not decide which block hash is the correct one and will abort the operation. Because of +this, the `by-number` and `by-range` methods also provide a `--delete-duplicates` flag, which orients +`graphman` to delete all duplicated blocks for the given number and resume its operation. + +### EXAMPLES + +Inspect a single Ethereum Mainnet block by hash: + + graphman --config config.toml chain check-blocks mainnet by-hash 0xd56a9f64c7e696cfeb337791a7f4a9e81841aaf4fcad69f9bf2b2e50ad72b972 + +Inspect a block using its number: + + graphman --config config.toml chain check-blocks mainnet by-number 15626962 + +Inspect a block range, deleting any duplicated blocks: + + graphman --config config.toml chain check-blocks mainnet by-range --from 15626900 --to 15626962 --delete-duplicates + +Inspect all blocks after block `13000000`: + + graphman --config config.toml chain check-blocks mainnet by-range --from 13000000 + + +# ⌘ Chain Call Cache Remove + +### SYNOPSIS + +Remove the call cache of the specified chain. + +If block numbers are not mentioned in `--from` and `--to`, then all the call cache will be removed. + +USAGE: + graphman chain call-cache remove [OPTIONS] + +OPTIONS: + -f, --from + Starting block number + + -h, --help + Print help information + + -t, --to + Ending block number + +### DESCRIPTION + +Remove the call cache of a specified chain. + +### OPTIONS + +The `from` and `to` options are used to decide the block range of the call cache that needs to be removed. + +#### `from` + +The `from` option is used to specify the starting block number of the block range. In the absence of `from` option, +the first block number will be used as the starting block number. + +#### `to` + +The `to` option is used to specify the ending block number of the block range. In the absence of `to` option, +the last block number will be used as the ending block number. + +### EXAMPLES + +Remove the call cache for all blocks numbered from 10 to 20: + + graphman --config config.toml chain call-cache ethereum remove --from 10 --to 20 + +Remove all the call cache of the specified chain: + + graphman --config config.toml chain call-cache ethereum remove + diff --git a/docs/implementation/metadata.md b/docs/implementation/metadata.md index 3183b516ad2..f58d3759c40 100644 --- a/docs/implementation/metadata.md +++ b/docs/implementation/metadata.md @@ -65,8 +65,7 @@ static data. | `deployment` | `text!` | IPFS hash | | `failed` | `boolean!` | | | `synced` | `boolean!` | | -| `earliest_ethereum_block_hash` | `bytea` | start block from manifest (to be removed) | -| `earliest_ethereum_block_number` | `numeric` | | +| `earliest_block_number` | `integer!` | earliest block for which we have data | | `latest_ethereum_block_hash` | `bytea` | current subgraph head | | `latest_ethereum_block_number` | `numeric` | | | `entity_count` | `numeric!` | total number of entities | diff --git a/docs/subgraph-manifest.md b/docs/subgraph-manifest.md index d1ba64dcea4..14b47b059dc 100644 --- a/docs/subgraph-manifest.md +++ b/docs/subgraph-manifest.md @@ -34,7 +34,7 @@ Any data format that has a well-defined 1:1 mapping with the [IPLD Canonical For | --- | --- | --- | | **kind** | *String | The type of data source. Possible values: *ethereum/contract*.| | **name** | *String* | The name of the source data. Will be used to generate APIs in the mapping and also for self-documentation purposes. | -| **network** | *String* | For blockchains, this describes which network the subgraph targets. For Ethereum, this can be any of "mainnet", "rinkeby", "kovan", "ropsten", "goerli", "poa-core", "poa-sokol", "xdai", "matic", "mumbai", "fantom", "bsc" or "clover". Developers could look for an up to date list in the graph-cli [*code*](https://github.com/graphprotocol/graph-cli/blob/master/src/commands/init.js#L43-L57).| +| **network** | *String* | For blockchains, this describes which network the subgraph targets. For Ethereum, this can be any of "mainnet", "rinkeby", "kovan", "ropsten", "goerli", "poa-core", "poa-sokol", "xdai", "matic", "mumbai", "fantom", "bsc" or "clover". Developers could look for an up to date list in the graph-cli [*code*](https://github.com/graphprotocol/graph-cli/blob/main/packages/cli/src/protocols/index.js#L70-L107).| | **source** | [*EthereumContractSource*](#151-ethereumcontractsource) | The source data on a blockchain such as Ethereum. | | **mapping** | [*Mapping*](#152-mapping) | The transformation logic applied to the data prior to being indexed. | diff --git a/graph/Cargo.toml b/graph/Cargo.toml index 89a82fa1ad4..fc1b5974c44 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -1,19 +1,19 @@ [package] name = "graph" -version = "0.27.0" -edition = "2021" +version.workspace = true +edition.workspace = true [dependencies] anyhow = "1.0" async-trait = "0.1.50" async-stream = "0.3" -atomic_refcell = "0.1.8" +atomic_refcell = "0.1.9" bigdecimal = { version = "0.1.0", features = ["serde"] } bytes = "1.0.1" -cid = "0.8.3" +cid = "0.10.1" diesel = { version = "1.4.8", features = ["postgres", "serde_json", "numeric", "r2d2", "chrono"] } diesel_derives = "1.4" -chrono = "0.4.22" +chrono = "0.4.23" envconfig = "0.10.0" Inflector = "0.11.3" isatty = "0.1.9" @@ -25,17 +25,18 @@ futures = "0.1.21" graphql-parser = "0.4.0" lazy_static = "1.4.0" num-bigint = { version = "^0.2.6", features = ["serde"] } -num_cpus = "1.13.1" +num_cpus = "1.15.0" num-traits = "0.2.15" rand = "0.8.4" -semver = { version = "1.0.12", features = ["serde"] } +regex = "1.5.4" +semver = { version = "1.0.16", features = ["serde"] } serde = { version = "1.0.126", features = ["rc"] } serde_derive = "1.0.125" serde_json = { version = "1.0", features = ["arbitrary_precision"] } serde_yaml = "0.8" slog = { version = "2.7.0", features = ["release_max_level_trace", "max_level_trace"] } stable-hash_legacy = { version = "0.3.3", package = "stable-hash" } -stable-hash = { version = "0.4.2"} +stable-hash = { version = "0.4.2" } strum = "0.21.0" strum_macros = "0.21.1" slog-async = "2.5.0" @@ -44,30 +45,30 @@ slog-term = "2.7.0" petgraph = "0.6.2" tiny-keccak = "1.5.0" tokio = { version = "1.16.1", features = ["time", "sync", "macros", "test-util", "rt-multi-thread", "parking_lot"] } -tokio-stream = { version = "0.1.9", features = ["sync"] } +tokio-stream = { version = "0.1.11", features = ["sync"] } tokio-retry = "0.3.0" -url = "2.2.1" -prometheus = "0.13.1" +url = "2.3.1" +prometheus = "0.13.3" priority-queue = "0.7.0" -tonic = { version = "0.7.1", features = ["tls-roots","compression"] } -prost = "0.10.4" -prost-types = "0.10.1" +tonic = { workspace = true } +prost = { workspace = true } +prost-types = { workspace = true } futures03 = { version = "0.3.1", package = "futures", features = ["compat"] } wasmparser = "0.78.2" thiserror = "1.0.25" parking_lot = "0.12.1" -itertools = "0.10.3" +itertools = "0.10.5" # Our fork contains patches to make some fields optional for Celo and Fantom compatibility. # Without the "arbitrary_precision" feature, we get the error `data did not match any variant of untagged enum Response`. web3 = { git = "https://github.com/graphprotocol/rust-web3", branch = "graph-patches-onto-0.18", features = ["arbitrary_precision"] } -serde_plain = "1.0.0" +serde_plain = "1.0.1" [dev-dependencies] test-store = { path = "../store/test-store" } +clap = { version = "3.2.23", features = ["derive", "env"] } maplit = "1.0.2" -structopt = { version = "0.3" } [build-dependencies] -tonic-build = { version = "0.7.2", features = ["prost","compression"] } +tonic-build = { workspace = true } diff --git a/graph/build.rs b/graph/build.rs index 67e99207ea1..14399c784c1 100644 --- a/graph/build.rs +++ b/graph/build.rs @@ -14,6 +14,7 @@ fn main() { .expect("Failed to compile Firehose proto(s)"); tonic_build::configure() + .protoc_arg("--experimental_allow_proto3_optional") .out_dir("src/substreams") .compile(&["proto/substreams.proto"], &["proto"]) .expect("Failed to compile Substreams proto(s)"); diff --git a/graph/examples/stress.rs b/graph/examples/stress.rs index 9414bd616db..0475437cecc 100644 --- a/graph/examples/stress.rs +++ b/graph/examples/stress.rs @@ -5,12 +5,12 @@ use std::sync::atomic::{AtomicUsize, Ordering::SeqCst}; use std::sync::Arc; use std::time::{Duration, Instant}; +use clap::Parser; use graph::data::value::{Object, Word}; use graph::object; use graph::prelude::{lazy_static, q, r, BigDecimal, BigInt, QueryResult}; use rand::SeedableRng; use rand::{rngs::SmallRng, Rng}; -use structopt::StructOpt; use graph::util::cache_weight::CacheWeight; use graph::util::lfu_cache::LfuCache; @@ -523,32 +523,32 @@ impl From for Entry { } // Command line arguments -#[derive(StructOpt)] -#[structopt(name = "stress", about = "Stress test for the LFU Cache")] +#[derive(Parser)] +#[clap(name = "stress", about = "Stress test for the LFU Cache")] struct Opt { /// Number of cache evictions and insertions - #[structopt(short, long, default_value = "1000")] + #[clap(short, long, default_value = "1000")] niter: usize, /// Print this many intermediate messages - #[structopt(short, long, default_value = "10")] + #[clap(short, long, default_value = "10")] print_count: usize, /// Use objects of size 0 up to this size, chosen unifromly randomly /// unless `--fixed` is given - #[structopt(short, long, default_value = "1024")] + #[clap(short, long, default_value = "1024")] obj_size: usize, - #[structopt(short, long, default_value = "1000000")] + #[clap(short, long, default_value = "1000000")] cache_size: usize, - #[structopt(short, long, default_value = "vec")] + #[clap(short, long, default_value = "vec")] template: String, - #[structopt(short, long)] + #[clap(short, long)] samples: bool, /// Always use objects of size `--obj-size` - #[structopt(short, long)] + #[clap(short, long)] fixed: bool, /// The seed of the random number generator. A seed of 0 means that all /// samples are taken from the same template object, and only differ in /// size - #[structopt(long)] + #[clap(long)] seed: Option, } diff --git a/graph/proto/ethereum/transforms.proto b/graph/proto/ethereum/transforms.proto index 57a3e0cb861..3b47c319630 100644 --- a/graph/proto/ethereum/transforms.proto +++ b/graph/proto/ethereum/transforms.proto @@ -1,12 +1,33 @@ syntax = "proto3"; package sf.ethereum.transform.v1; -option go_package = "github.com/streamingfast/sf-ethereum/types/pb/sf/ethereum/transform/v1;pbtransform"; +option go_package = "github.com/streamingfast/firehose-ethereum/types/pb/sf/ethereum/transform/v1;pbtransform"; -// Log and CallTo Filters, applied as 'inclusive OR' +// CombinedFilter is a combination of "LogFilters" and "CallToFilters" +// +// It transforms the requested stream in two ways: +// 1. STRIPPING +// The block data is stripped from all transactions that don't +// match any of the filters. +// +// 2. SKIPPING +// If an "block index" covers a range containing a +// block that does NOT match any of the filters, the block will be +// skipped altogether, UNLESS send_all_block_headers is enabled +// In that case, the block would still be sent, but without any +// transactionTrace +// +// The SKIPPING feature only applies to historical blocks, because +// the "block index" is always produced after the merged-blocks files +// are produced. Therefore, the "live" blocks are never filtered out. +// message CombinedFilter { repeated LogFilter log_filters = 1; repeated CallToFilter call_filters = 2; + + // Always send all blocks. if they don't match any log_filters or call_filters, + // all the transactions will be filtered out, sending only the header. + bool send_all_block_headers = 3; } // MultiLogFilter concatenates the results of each LogFilter (inclusive OR) @@ -39,5 +60,26 @@ message CallToFilter { repeated bytes signatures = 2; } +// Deprecated: LightBlock is deprecated, replaced by HeaderOnly, note however that the new transform +// does not have any transactions traces returned, so it's not a direct replacement. message LightBlock { } + +// HeaderOnly returns only the block's header and few top-level core information for the block. Useful +// for cases where no transactions information is required at all. +// +// The structure that would will have access to after: +// +// ```ignore +// Block { +// int32 ver = 1; +// bytes hash = 2; +// uint64 number = 3; +// uint64 size = 4; +// BlockHeader header = 5; +// } +// ``` +// +// Everything else will be empty. +message HeaderOnly { +} diff --git a/graph/proto/firehose.proto b/graph/proto/firehose.proto index b8060280de3..a4101a83e18 100644 --- a/graph/proto/firehose.proto +++ b/graph/proto/firehose.proto @@ -1,17 +1,52 @@ syntax = "proto3"; -package sf.firehose.v1; +package sf.firehose.v2; import "google/protobuf/any.proto"; -option go_package = "github.com/streamingfast/pbgo/sf/firehose/v1;pbfirehose"; +option go_package = "github.com/streamingfast/pbgo/sf/firehose/v2;pbfirehose"; service Stream { rpc Blocks(Request) returns (stream Response); } -// For historical segments, forks are not passed +service Fetch { + rpc Block(SingleBlockRequest) returns (SingleBlockResponse); +} + +message SingleBlockRequest { + + // Get the current known canonical version of a block at with this number + message BlockNumber{ + uint64 num=1; + } + + // Get the current block with specific hash and number + message BlockHashAndNumber{ + uint64 num=1; + string hash=2; + } + + // Get the block that generated a specific cursor + message Cursor{ + string cursor=1; + } + + oneof reference{ + BlockNumber block_number=3; + BlockHashAndNumber block_hash_and_number=4; + Cursor cursor=5; + } + + repeated google.protobuf.Any transforms = 6; +} + +message SingleBlockResponse { + google.protobuf.Any block = 1; +} + message Request { + // Controls where the stream of blocks will start. // // The stream will start **inclusively** at the requested block num. @@ -25,86 +60,51 @@ message Request { // #50. If it resolves before first streamable block of chain, we assume start // of chain. // - // If `start_cursor` is passed, this value is ignored and the stream instead starts + // If `start_cursor` is given, this value is ignored and the stream instead starts // immediately after the Block pointed by the opaque `start_cursor` value. int64 start_block_num = 1; // Controls where the stream of blocks will start which will be immediately after // the Block pointed by this opaque cursor. // - // Obtain this value from a previously received from `Response.cursor`. + // Obtain this value from a previously received `Response.cursor`. // // This value takes precedence over `start_block_num`. - string start_cursor = 13; + string cursor = 2; // When non-zero, controls where the stream of blocks will stop. // // The stream will close **after** that block has passed so the boundary is // **inclusive**. - uint64 stop_block_num = 5; - - // Filter the steps you want to see. If not specified, defaults to all steps. - // - // Most common steps will be [STEP_IRREVERSIBLE], or [STEP_NEW, STEP_UNDO, STEP_IRREVERSIBLE]. - repeated ForkStep fork_steps = 8; - - // The CEL filter expression used to include transactions, specific to the target protocol, - // works in combination with `exclude_filter_expr` value. - string include_filter_expr = 10; - - // The CEL filter expression used to exclude transactions, specific to the target protocol, works - // in combination with `include_filter_expr` value. - string exclude_filter_expr = 11; - - // **Warning** Experimental API, controls how blocks are trimmed for extraneous information before - // being sent back. The actual trimming is chain dependent. - //BlockDetails details = 15; - reserved 15; + uint64 stop_block_num = 3; - // controls how many confirmations will consider a given block as final (STEP_IRREVERSIBLE). Warning, if any reorg goes beyond that number of confirmations, the request will stall forever - //uint64 confirmations = 16; - reserved 16; + // With final_block_only, you only receive blocks with STEP_FINAL + // Default behavior will send blocks as STEP_NEW, with occasional STEP_UNDO + bool final_blocks_only = 4; - - //- EOS "handoffs:3" - //- EOS "lib" - //- EOS "confirms:3" - //- ETH "confirms:200" - //- ETH "confirms:7" - //- SOL "commmitement:finalized" - //- SOL "confirms:200" - string irreversibility_condition = 17; - - repeated google.protobuf.Any transforms = 18; + repeated google.protobuf.Any transforms = 10; } message Response { - // Chain specific block payload, one of: - // - sf.eosio.codec.v1.Block - // - sf.ethereum.codec.v1.Block - // - sf.near.codec.v1.Block - // - sf.solana.codec.v1.Block + // Chain specific block payload, ex: + // - sf.eosio.type.v1.Block + // - sf.ethereum.type.v1.Block + // - sf.near.type.v1.Block google.protobuf.Any block = 1; ForkStep step = 6; string cursor = 10; } enum ForkStep { - STEP_UNKNOWN = 0; - // Block is new head block of the chain, that is linear with the previous block + STEP_UNSET = 0; + + // Incoming block STEP_NEW = 1; - // Block is now forked and should be undone, it's not the head block of the chain anymore + + // A reorg caused this specific block to be excluded from the chain STEP_UNDO = 2; - // Removed, was STEP_REDO - reserved 3; - // Block is now irreversible and can be committed to (finality is chain specific, see chain documentation for more details) - STEP_IRREVERSIBLE = 4; - // Removed, was STEP_STALLED - reserved 5 ; -} -// TODO: move to ethereum specific transforms -enum BlockDetails { - BLOCK_DETAILS_FULL = 0; - BLOCK_DETAILS_LIGHT = 1; -} \ No newline at end of file + // Block is now final and can be committed (finality is chain specific, + // see chain documentation for more details) + STEP_FINAL = 3; +} diff --git a/graph/proto/substreams.proto b/graph/proto/substreams.proto index 564d0cbf05f..e860e3bfdb1 100644 --- a/graph/proto/substreams.proto +++ b/graph/proto/substreams.proto @@ -1,16 +1,27 @@ +// File generated using this command at the root of `graph-node` project +// and assuming `substreams` repository is a sibling of `graph-node` (note that you +// might need to adjust the `head -nN` and `skip N` values in the commands below to skip +// more/less lines): +// +// ``` +// cat graph/proto/substreams.proto | head -n16 > /tmp/substreams.proto && mv /tmp/substreams.proto graph/proto/substreams.proto +// cat ../substreams/proto/sf/substreams/v1/substreams.proto | grep -Ev 'import *"sf/substreams' >> graph/proto/substreams.proto +// cat ../substreams/proto/sf/substreams/v1/modules.proto | skip 6 >> graph/proto/substreams.proto +// cat ../substreams/proto/sf/substreams/v1/package.proto | skip 9 >> graph/proto/substreams.proto +// cat ../substreams/proto/sf/substreams/v1/clock.proto | skip 7 >> graph/proto/substreams.proto +// # Manually add line `import "google/protobuf/descriptor.proto";` below `import "google/protobuf/timestamp.proto";` +// ``` +// +// FIXME: We copy over and inline most of the substreams files, this is bad and we need a better way to +// generate that, outside of doing this copying over. syntax = "proto3"; package sf.substreams.v1; - option go_package = "github.com/streamingfast/substreams/pb/sf/substreams/v1;pbsubstreams"; import "google/protobuf/any.proto"; -import "google/protobuf/descriptor.proto"; import "google/protobuf/timestamp.proto"; - -// FIXME: I copied over and inlined most of the substreams files, this is bad and we need a better way to -// generate that, outside of doing this copying over. We should check maybe `buf` or a pre-populated -// package. +import "google/protobuf/descriptor.proto"; service Stream { rpc Blocks(Request) returns (stream Response); @@ -23,6 +34,18 @@ message Request { repeated ForkStep fork_steps = 4; string irreversibility_condition = 5; + // By default, the engine runs in developer mode, with richer and deeper output, + // * support for multiple `output_modules`, of `store` and `map` kinds + // * support for `initial_store_snapshot_for_modules` + // * log outputs for output modules + // + // With `production_mode`, however, you trade off functionality for high speed, where it: + // * restricts the possible requested `output_modules` to a single mapper module, + // * turns off support for `initial_store_snapshot_for_modules`, + // * still streams output linearly, with a cursor, but at higher speeds + // * and purges log outputs from responses. + bool production_mode = 9; + Modules modules = 6; repeated string output_modules = 7; repeated string initial_store_snapshot_for_modules = 8; @@ -30,6 +53,7 @@ message Request { message Response { oneof message { + SessionInit session = 5; // Always sent first ModulesProgress progress = 1; // Progress of data preparation, before sending in the stream of `data` events. InitialSnapshotData snapshot_data = 2; InitialSnapshotComplete snapshot_complete = 3; @@ -51,6 +75,10 @@ enum ForkStep { reserved 5; } +message SessionInit { + string trace_id = 1; +} + message InitialSnapshotComplete { string cursor = 1; } @@ -71,17 +99,36 @@ message BlockScopedData { message ModuleOutput { string name = 1; + oneof data { google.protobuf.Any map_output = 2; - StoreDeltas store_deltas = 3; - } - repeated string logs = 4; + // StoreDeltas are produced for store modules in development mode. + // It is not possible to retrieve store models in production, with parallelization + // enabled. If you need the deltas directly, write a pass through mapper module + // that will get them down to you. + StoreDeltas debug_store_deltas = 3; + } + repeated string debug_logs = 4; // LogsTruncated is a flag that tells you if you received all the logs or if they // were truncated because you logged too much (fixed limit currently is set to 128 KiB). - bool logs_truncated = 5; + bool debug_logs_truncated = 5; + + bool cached = 6; } +// think about: +// message ModuleOutput { ... +// ModuleOutputDebug debug_info = 6; +// ...} +//message ModuleOutputDebug { +// StoreDeltas store_deltas = 3; +// repeated string logs = 4; +// // LogsTruncated is a flag that tells you if you received all the logs or if they +// // were truncated because you logged too much (fixed limit currently is set to 128 KiB). +// bool logs_truncated = 5; +//} + message ModulesProgress { repeated ModuleProgress modules = 1; } @@ -116,8 +163,8 @@ message ModuleProgress { } message BlockRange { - uint64 start_block = 1; - uint64 end_block = 2; + uint64 start_block = 2; + uint64 end_block = 3; } message StoreDeltas { @@ -144,7 +191,6 @@ message Output { google.protobuf.Timestamp timestamp = 4; google.protobuf.Any value = 10; } - message Modules { repeated Module modules = 1; repeated Binary binaries = 2; @@ -199,8 +245,9 @@ message Module { UPDATE_POLICY_MIN = 4; // Provides a store where you can `max_*()` keys, where two stores merge by leaving the maximum value. UPDATE_POLICY_MAX = 5; + // Provides a store where you can `append()` keys, where two stores merge by concatenating the bytes in order. + UPDATE_POLICY_APPEND = 6; } - } message Input { @@ -232,13 +279,6 @@ message Module { string type = 1; } } - -message Clock { - string id = 1; - uint64 number = 2; - google.protobuf.Timestamp timestamp = 3; -} - message Package { // Needs to be one so this file can be used _directly_ as a // buf `Image` andor a ProtoSet for grpcurl and other tools @@ -265,3 +305,8 @@ message ModuleMetadata { uint64 package_index = 1; string doc = 2; } +message Clock { + string id = 1; + uint64 number = 2; + google.protobuf.Timestamp timestamp = 3; +} diff --git a/graph/src/blockchain/block_stream.rs b/graph/src/blockchain/block_stream.rs index 801ba99c439..001fc5c7da7 100644 --- a/graph/src/blockchain/block_stream.rs +++ b/graph/src/blockchain/block_stream.rs @@ -10,7 +10,7 @@ use super::{Block, BlockPtr, Blockchain}; use crate::anyhow::Result; use crate::components::store::{BlockNumber, DeploymentLocator}; use crate::data::subgraph::UnifiedMappingApiVersion; -use crate::firehose; +use crate::firehose::{self, FirehoseEndpoint}; use crate::substreams::BlockScopedData; use crate::{prelude::*, prometheus::labels}; @@ -84,6 +84,20 @@ pub trait BlockStream: { } +/// BlockRefetcher abstraction allows a chain to decide if a block must be refetched after a dynamic data source was added +#[async_trait] +pub trait BlockRefetcher: Send + Sync { + // type Block: Block + Clone + Debug + Default; + fn required(&self, chain: &C) -> bool; + + async fn get_block( + &self, + chain: &C, + logger: &Logger, + cursor: FirehoseCursor, + ) -> Result; +} + /// BlockStreamBuilder is an abstraction that would separate the logic for building streams from the blockchain trait #[async_trait] pub trait BlockStreamBuilder: Send + Sync { @@ -123,7 +137,7 @@ impl FirehoseCursor { impl fmt::Display for FirehoseCursor { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.write_str(&self.0.as_deref().unwrap_or("")) + f.write_str(self.0.as_deref().unwrap_or("")) } } @@ -131,7 +145,7 @@ impl From for FirehoseCursor { fn from(cursor: String) -> Self { // Treat a cursor of "" as None, not absolutely necessary for correctness since the firehose // treats both as the same, but makes it a little clearer. - if cursor == "" { + if cursor.is_empty() { FirehoseCursor::None } else { FirehoseCursor(Some(cursor)) @@ -173,9 +187,31 @@ where } impl BlockWithTriggers { - pub fn new(block: C::Block, mut trigger_data: Vec) -> Self { + /// Creates a BlockWithTriggers structure, which holds + /// the trigger data ordered and without any duplicates. + pub fn new(block: C::Block, mut trigger_data: Vec, logger: &Logger) -> Self { // This is where triggers get sorted. trigger_data.sort(); + + let old_len = trigger_data.len(); + + // This is removing the duplicate triggers in the case of multiple + // data sources fetching the same event/call/etc. + trigger_data.dedup(); + + let new_len = trigger_data.len(); + + if new_len != old_len { + debug!( + logger, + "Trigger data had duplicate triggers"; + "block_number" => block.number(), + "block_hash" => block.hash().hash_hex(), + "old_length" => old_len, + "new_length" => new_len, + ); + } + Self { block, trigger_data, @@ -254,6 +290,7 @@ pub trait FirehoseMapper: Send + Sync { async fn block_ptr_for_number( &self, logger: &Logger, + endpoint: &Arc, number: BlockNumber, ) -> Result; @@ -271,6 +308,7 @@ pub trait FirehoseMapper: Send + Sync { async fn final_block_ptr_for( &self, logger: &Logger, + endpoint: &Arc, block: &C::Block, ) -> Result; } @@ -299,6 +337,8 @@ pub enum FirehoseError { #[derive(Error, Debug)] pub enum SubstreamsError { + #[error("response is missing the clock information")] + MissingClockError, /// We were unable to decode the received block payload into the chain specific Block struct (e.g. chain_ethereum::pb::Block) #[error("received gRPC block payload cannot be decoded: {0}")] DecodingError(#[from] prost::DecodeError), @@ -308,10 +348,13 @@ pub enum SubstreamsError { UnknownError(#[from] anyhow::Error), #[error("multiple module output error")] - MultipleModuleOutputError(), + MultipleModuleOutputError, + + #[error("module output was not available (none) or wrong data provided")] + ModuleOutputNotPresentOrUnexpected, #[error("unexpected store delta output")] - UnexpectedStoreDeltaOutput(), + UnexpectedStoreDeltaOutput, } #[derive(Debug)] @@ -457,7 +500,7 @@ mod test { let mut count = 0; loop { match stream.next().await { - None if blocks.len() == 0 => panic!("None before blocks"), + None if blocks.is_empty() => panic!("None before blocks"), Some(Err(CancelableError::Cancel)) => { assert!(guard.is_canceled(), "Guard shouldn't be called yet"); diff --git a/graph/src/blockchain/empty_node_capabilities.rs b/graph/src/blockchain/empty_node_capabilities.rs new file mode 100644 index 00000000000..738d4561984 --- /dev/null +++ b/graph/src/blockchain/empty_node_capabilities.rs @@ -0,0 +1,46 @@ +use std::marker::PhantomData; + +use super::{Blockchain, NodeCapabilities}; + +/// A boring implementor of [`NodeCapabilities`] for blockchains that +/// only need an empty `struct`. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct EmptyNodeCapabilities(PhantomData); + +impl Default for EmptyNodeCapabilities { + fn default() -> Self { + EmptyNodeCapabilities(PhantomData) + } +} + +impl std::fmt::Display for EmptyNodeCapabilities +where + C: Blockchain, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", C::KIND) + } +} + +impl slog::Value for EmptyNodeCapabilities +where + C: Blockchain, +{ + fn serialize( + &self, + record: &slog::Record, + key: slog::Key, + serializer: &mut dyn slog::Serializer, + ) -> slog::Result { + slog::Value::serialize(&C::KIND.to_string(), record, key, serializer) + } +} + +impl NodeCapabilities for EmptyNodeCapabilities +where + C: Blockchain, +{ + fn from_data_sources(_data_sources: &[C::DataSource]) -> Self { + EmptyNodeCapabilities(PhantomData) + } +} diff --git a/graph/src/blockchain/firehose_block_ingestor.rs b/graph/src/blockchain/firehose_block_ingestor.rs index 965272c0d35..7be9126cc04 100644 --- a/graph/src/blockchain/firehose_block_ingestor.rs +++ b/graph/src/blockchain/firehose_block_ingestor.rs @@ -3,15 +3,35 @@ use std::{marker::PhantomData, sync::Arc, time::Duration}; use crate::{ blockchain::Block as BlockchainBlock, components::store::ChainStore, - firehose::{self, decode_firehose_block, FirehoseEndpoint}, + firehose::{self, decode_firehose_block, FirehoseEndpoint, HeaderOnly}, prelude::{error, info, Logger}, util::backoff::ExponentialBackoff, }; use anyhow::{Context, Error}; use futures03::StreamExt; +use prost::Message; +use prost_types::Any; use slog::trace; use tonic::Streaming; +const TRANSFORM_ETHEREUM_HEADER_ONLY: &str = + "type.googleapis.com/sf.ethereum.transform.v1.HeaderOnly"; + +pub enum Transforms { + EthereumHeaderOnly, +} + +impl Into for &Transforms { + fn into(self) -> Any { + match self { + Transforms::EthereumHeaderOnly => Any { + type_url: TRANSFORM_ETHEREUM_HEADER_ONLY.to_owned(), + value: HeaderOnly {}.encode_to_vec(), + }, + } + } +} + pub struct FirehoseBlockIngestor where M: prost::Message + BlockchainBlock + Default + 'static, @@ -19,6 +39,7 @@ where chain_store: Arc, endpoint: Arc, logger: Logger, + default_transforms: Vec, phantom: PhantomData, } @@ -37,12 +58,16 @@ where endpoint, logger, phantom: PhantomData {}, + default_transforms: vec![], } } - pub async fn run(self) { - use firehose::ForkStep::*; + pub fn with_transforms(mut self, transforms: Vec) -> Self { + self.default_transforms = transforms; + self + } + pub async fn run(self) { let mut latest_cursor = self.fetch_head_cursor().await; let mut backoff = ExponentialBackoff::new(Duration::from_millis(250), Duration::from_secs(30)); @@ -59,8 +84,9 @@ where .stream_blocks(firehose::Request { // Starts at current HEAD block of the chain (viewed from Firehose side) start_block_num: -1, - start_cursor: latest_cursor.clone(), - fork_steps: vec![StepNew as i32, StepUndo as i32], + cursor: latest_cursor.clone(), + final_blocks_only: false, + transforms: self.default_transforms.iter().map(|t| t.into()).collect(), ..Default::default() }) .await; @@ -73,7 +99,7 @@ where latest_cursor = self.process_blocks(latest_cursor, stream).await } Err(e) => { - error!(self.logger, "Unable to connect to endpoint: {:?}", e); + error!(self.logger, "Unable to connect to endpoint: {:#}", e); } } @@ -89,7 +115,7 @@ where match self.chain_store.clone().chain_head_cursor() { Ok(cursor) => return cursor.unwrap_or_else(|| "".to_string()), Err(e) => { - error!(self.logger, "Fetching chain head cursor failed: {:?}", e); + error!(self.logger, "Fetching chain head cursor failed: {:#}", e); backoff.sleep_async().await; } @@ -122,13 +148,13 @@ where trace!(self.logger, "Received undo block to ingest, skipping"); Ok(()) } - StepIrreversible | StepUnknown => panic!( + StepFinal | StepUnset => panic!( "We explicitly requested StepNew|StepUndo but received something else" ), }; if let Err(e) = result { - error!(self.logger, "Process block failed: {:?}", e); + error!(self.logger, "Process block failed: {:#}", e); break; } diff --git a/graph/src/blockchain/firehose_block_stream.rs b/graph/src/blockchain/firehose_block_stream.rs index bcf4c85ea04..cc60acdb38a 100644 --- a/graph/src/blockchain/firehose_block_stream.rs +++ b/graph/src/blockchain/firehose_block_stream.rs @@ -2,7 +2,6 @@ use super::block_stream::{BlockStream, BlockStreamEvent, FirehoseMapper}; use super::{Blockchain, TriggersAdapter}; use crate::blockchain::block_stream::FirehoseCursor; use crate::blockchain::TriggerFilter; -use crate::firehose::ForkStep::*; use crate::prelude::*; use crate::util::backoff::ExponentialBackoff; use crate::{firehose, firehose::FirehoseEndpoint}; @@ -231,8 +230,8 @@ fn stream_blocks>( let mut request = firehose::Request { start_block_num: start_block_num as i64, - start_cursor: latest_cursor.to_string(), - fork_steps: vec![StepNew as i32, StepUndo as i32], + cursor: latest_cursor.to_string(), + final_blocks_only: false, ..Default::default() }; @@ -256,6 +255,7 @@ fn stream_blocks>( for await response in stream { match process_firehose_response( + &endpoint, response, &mut check_subgraph_continuity, manifest_start_block_num, @@ -344,6 +344,7 @@ enum BlockResponse { } async fn process_firehose_response>( + endpoint: &Arc, result: Result, check_subgraph_continuity: &mut bool, manifest_start_block_num: BlockNumber, @@ -374,7 +375,7 @@ async fn process_firehose_response>( ); let mut revert_to = mapper - .final_block_ptr_for(logger, &block.block) + .final_block_ptr_for(logger, endpoint, &block.block) .await .context("Could not fetch final block to revert to")?; @@ -389,7 +390,7 @@ async fn process_firehose_response>( } revert_to = mapper - .block_ptr_for_number(logger, block_num) + .block_ptr_for_number(logger, endpoint, block_num) .await .context("Could not fetch manifest start block to revert to")?; } diff --git a/graph/src/blockchain/mock.rs b/graph/src/blockchain/mock.rs index ba638d82e71..42b779d10d4 100644 --- a/graph/src/blockchain/mock.rs +++ b/graph/src/blockchain/mock.rs @@ -37,14 +37,9 @@ impl Block for MockBlock { } } +#[derive(Clone)] pub struct MockDataSource; -impl Clone for MockDataSource { - fn clone(&self) -> Self { - todo!() - } -} - impl TryFrom> for MockDataSource { type Error = Error; @@ -54,6 +49,10 @@ impl TryFrom> for MockDataSource { } impl DataSource for MockDataSource { + fn from_template_info(_template_info: DataSourceTemplateInfo) -> Result { + todo!() + } + fn address(&self) -> Option<&[u8]> { todo!() } @@ -322,6 +321,18 @@ impl Blockchain for MockBlockchain { todo!() } + fn is_refetch_block_required(&self) -> bool { + false + } + + async fn refetch_firehose_block( + &self, + _logger: &slog::Logger, + _cursor: FirehoseCursor, + ) -> Result { + todo!() + } + fn chain_store(&self) -> std::sync::Arc { todo!() } diff --git a/graph/src/blockchain/mod.rs b/graph/src/blockchain/mod.rs index 88df6170843..81c495afad9 100644 --- a/graph/src/blockchain/mod.rs +++ b/graph/src/blockchain/mod.rs @@ -3,6 +3,7 @@ //! trait which is the centerpiece of this module. pub mod block_stream; +mod empty_node_capabilities; pub mod firehose_block_ingestor; pub mod firehose_block_stream; pub mod mock; @@ -34,7 +35,6 @@ use slog::Logger; use std::{ any::Any, collections::HashMap, - convert::TryFrom, fmt::{self, Debug}, str::FromStr, sync::Arc, @@ -42,6 +42,7 @@ use std::{ use web3::types::H256; pub use block_stream::{ChainHeadUpdateListener, ChainHeadUpdateStream, TriggersAdapter}; +pub use empty_node_capabilities::EmptyNodeCapabilities; pub use types::{BlockHash, BlockPtr, ChainIdentifier}; use self::block_stream::{BlockStream, FirehoseCursor}; @@ -72,11 +73,55 @@ pub trait Block: Send + Sync { } /// The data that should be stored for this block in the `ChainStore` + /// TODO: Return ChainStoreData once it is available for all chains fn data(&self) -> Result { Ok(serde_json::Value::Null) } } +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +/// This is the root data for the chain store. This stucture provides backwards +/// compatibility with existing data for ethereum. +pub struct ChainStoreData { + pub block: ChainStoreBlock, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +/// ChainStoreBlock is intended to standardize the information stored in the data +/// field of the ChainStore. All the chains should eventually return this type +/// on the data() implementation for block. This will ensure that any part of the +/// structured data can be relied upon for all chains. +pub struct ChainStoreBlock { + /// Unix timestamp (seconds since epoch), can be stored as hex or decimal. + timestamp: String, + data: serde_json::Value, +} + +impl ChainStoreBlock { + pub fn new(unix_timestamp: i64, data: serde_json::Value) -> Self { + Self { + timestamp: unix_timestamp.to_string(), + data, + } + } + + pub fn timestamp_str(&self) -> &str { + &self.timestamp + } + + pub fn timestamp(&self) -> i64 { + let (rdx, i) = if self.timestamp.starts_with("0x") { + (16, 2) + } else { + (10, 0) + }; + + i64::from_str_radix(&self.timestamp[i..], rdx).unwrap_or(0) + } +} + #[async_trait] // This is only `Debug` because some tests require that pub trait Blockchain: Debug + Sized + Send + Sync + Unpin + 'static { @@ -138,6 +183,14 @@ pub trait Blockchain: Debug + Sized + Send + Sync + Unpin + 'static { number: BlockNumber, ) -> Result; + async fn refetch_firehose_block( + &self, + logger: &Logger, + cursor: FirehoseCursor, + ) -> Result; + + fn is_refetch_block_required(&self) -> bool; + fn runtime_adapter(&self) -> Arc>; fn is_firehose_supported(&self) -> bool; @@ -157,13 +210,7 @@ pub enum IngestorError { /// An unexpected error occurred. #[error("Ingestor error: {0:#}")] - Unknown(Error), -} - -impl From for IngestorError { - fn from(e: Error) -> Self { - IngestorError::Unknown(e) - } + Unknown(#[from] Error), } impl From for IngestorError { @@ -190,9 +237,14 @@ pub trait TriggerFilter: Default + Clone + Send + Sync { fn to_firehose_filter(self) -> Vec; } -pub trait DataSource: - 'static + Sized + Send + Sync + Clone + TryFrom, Error = anyhow::Error> -{ +pub trait DataSource: 'static + Sized + Send + Sync + Clone { + fn from_template_info(info: DataSourceTemplateInfo) -> Result; + + fn from_stored_dynamic_data_source( + template: &C::DataSourceTemplate, + stored: StoredDynamicDataSource, + ) -> Result; + fn address(&self) -> Option<&[u8]>; fn start_block(&self) -> BlockNumber; fn name(&self) -> &str; @@ -225,11 +277,6 @@ pub trait DataSource: fn as_stored_dynamic_data_source(&self) -> StoredDynamicDataSource; - fn from_stored_dynamic_data_source( - template: &C::DataSourceTemplate, - stored: StoredDynamicDataSource, - ) -> Result; - /// Used as part of manifest validation. If there are no errors, return an empty vector. fn validate(&self) -> Vec; } diff --git a/graph/src/blockchain/substreams_block_stream.rs b/graph/src/blockchain/substreams_block_stream.rs index 291e14b210f..e0a875f903b 100644 --- a/graph/src/blockchain/substreams_block_stream.rs +++ b/graph/src/blockchain/substreams_block_stream.rs @@ -162,25 +162,21 @@ fn stream_blocks>( module_name: String, manifest_start_block_num: BlockNumber, manifest_end_block_num: BlockNumber, - _subgraph_current_block: Option, + subgraph_current_block: Option, logger: Logger, metrics: SubstreamsBlockStreamMetrics, ) -> impl Stream, Error>> { let mut latest_cursor = cursor.unwrap_or_else(|| "".to_string()); - let start_block_num = manifest_start_block_num as i64; - let stop_block_num = manifest_end_block_num as u64; + let start_block_num = subgraph_current_block + .as_ref() + .map(|ptr| { + // current_block has already been processed, we start at next block + ptr.block_number() as i64 + 1 + }) + .unwrap_or(manifest_start_block_num as i64); - let request = Request { - start_block_num, - start_cursor: latest_cursor.clone(), - stop_block_num, - fork_steps: vec![StepNew as i32, StepUndo as i32], - irreversibility_condition: "".to_string(), - modules, - output_modules: vec![module_name], - ..Default::default() - }; + let stop_block_num = manifest_end_block_num as u64; // Back off exponentially whenever we encounter a connection error or a stream with bad data let mut backoff = ExponentialBackoff::new(Duration::from_millis(500), Duration::from_secs(45)); @@ -203,7 +199,19 @@ fn stream_blocks>( skip_backoff = false; let mut connect_start = Instant::now(); - let result = endpoint.clone().substreams(request.clone()).await; + let request = Request { + start_block_num, + start_cursor: latest_cursor.clone(), + stop_block_num, + fork_steps: vec![StepNew as i32, StepUndo as i32], + irreversibility_condition: "".to_string(), + modules: modules.clone(), + output_modules: vec![module_name.clone()], + production_mode: true, + ..Default::default() + }; + + let result = endpoint.clone().substreams(request).await; match result { Ok(stream) => { @@ -264,7 +272,7 @@ fn stream_blocks>( metrics.observe_failed_connection(&mut connect_start); - error!(logger, "Unable to connect to endpoint: {:?}", e); + error!(logger, "Unable to connect to endpoint: {:#}", e); } } @@ -287,7 +295,7 @@ async fn process_substreams_response>( ) -> Result>, Error> { let response = match result { Ok(v) => v, - Err(e) => return Err(anyhow!("An error occurred while streaming blocks: {:?}", e)), + Err(e) => return Err(anyhow!("An error occurred while streaming blocks: {:#}", e)), }; match response.message { diff --git a/graph/src/components/ethereum/types.rs b/graph/src/components/ethereum/types.rs index ca823aa1f97..69bec236648 100644 --- a/graph/src/components/ethereum/types.rs +++ b/graph/src/components/ethereum/types.rs @@ -101,7 +101,7 @@ pub struct EthereumBlock { pub transaction_receipts: Vec>, } -#[derive(Debug, Default, Clone, PartialEq)] +#[derive(Debug, Default, Clone, PartialEq, Eq)] pub struct EthereumCall { pub from: Address, pub to: Address, diff --git a/graph/src/components/graphql.rs b/graph/src/components/graphql.rs index 78c5472cf71..ed7738308c2 100644 --- a/graph/src/components/graphql.rs +++ b/graph/src/components/graphql.rs @@ -17,7 +17,6 @@ pub enum GraphQlTarget { SubgraphName(String), Deployment(DeploymentHash), } - /// A component that can run GraphqL queries against a [Store](../store/trait.Store.html). #[async_trait] pub trait GraphQlRunner: Send + Sync + 'static { @@ -51,6 +50,7 @@ pub trait GraphQLMetrics: Send + Sync + 'static { fn observe_query_execution(&self, duration: Duration, results: &QueryResults); fn observe_query_parsing(&self, duration: Duration, results: &QueryResults); fn observe_query_validation(&self, duration: Duration, id: &DeploymentHash); + fn observe_query_validation_error(&self, error_codes: Vec<&str>, id: &DeploymentHash); } #[async_trait] diff --git a/graph/src/components/metrics/subgraph.rs b/graph/src/components/metrics/subgraph.rs index c274b934aad..f7dfc82c7a8 100644 --- a/graph/src/components/metrics/subgraph.rs +++ b/graph/src/components/metrics/subgraph.rs @@ -1,18 +1,28 @@ +use prometheus::Counter; + use crate::blockchain::block_stream::BlockStreamMetrics; use crate::prelude::{Gauge, Histogram, HostMetrics, MetricsRegistry}; use std::collections::HashMap; use std::sync::Arc; +use super::stopwatch::StopwatchMetrics; + pub struct SubgraphInstanceMetrics { pub block_trigger_count: Box, pub block_processing_duration: Box, pub block_ops_transaction_duration: Box, + pub firehose_connection_errors: Counter, + pub stopwatch: StopwatchMetrics, trigger_processing_duration: Box, } impl SubgraphInstanceMetrics { - pub fn new(registry: Arc, subgraph_hash: &str) -> Self { + pub fn new( + registry: Arc, + subgraph_hash: &str, + stopwatch: StopwatchMetrics, + ) -> Self { let block_trigger_count = registry .new_deployment_histogram( "deployment_block_trigger_count", @@ -46,11 +56,21 @@ impl SubgraphInstanceMetrics { ) .expect("failed to create `deployment_transact_block_operations_duration_{}"); + let firehose_connection_errors = registry + .new_deployment_counter( + "firehose_connection_errors", + "Measures connections when trying to obtain a firehose connection", + subgraph_hash, + ) + .expect("failed to create firehose_connection_errors counter"); + Self { block_trigger_count, block_processing_duration, trigger_processing_duration, block_ops_transaction_duration, + firehose_connection_errors, + stopwatch, } } diff --git a/graph/src/components/server/admin.rs b/graph/src/components/server/admin.rs deleted file mode 100644 index f160982eb4e..00000000000 --- a/graph/src/components/server/admin.rs +++ /dev/null @@ -1,19 +0,0 @@ -use std::io; -use std::sync::Arc; - -use crate::prelude::Logger; -use crate::prelude::NodeId; - -/// Common trait for JSON-RPC admin server implementations. -pub trait JsonRpcServer

{ - type Server; - - fn serve( - port: u16, - http_port: u16, - ws_port: u16, - provider: Arc

, - node_id: NodeId, - logger: Logger, - ) -> Result; -} diff --git a/graph/src/components/server/metrics.rs b/graph/src/components/server/metrics.rs deleted file mode 100644 index 1bd9f4e1fd1..00000000000 --- a/graph/src/components/server/metrics.rs +++ /dev/null @@ -1,12 +0,0 @@ -use futures::prelude::*; - -/// Common trait for index node server implementations. -pub trait MetricsServer { - type ServeError; - - /// Creates a new Tokio task that, when spawned, brings up the index node server. - fn serve( - &mut self, - port: u16, - ) -> Result + Send>, Self::ServeError>; -} diff --git a/graph/src/components/server/mod.rs b/graph/src/components/server/mod.rs index c1af2ceda30..da2b4d47b76 100644 --- a/graph/src/components/server/mod.rs +++ b/graph/src/components/server/mod.rs @@ -4,11 +4,5 @@ pub mod query; /// Component for running GraphQL subscriptions over WebSockets. pub mod subscription; -/// Component for the JSON-RPC admin API. -pub mod admin; - /// Component for the index node server. pub mod index_node; - -/// Components for the Prometheus metrics server. -pub mod metrics; diff --git a/graph/src/components/store/cache.rs b/graph/src/components/store/entity_cache.rs similarity index 85% rename from graph/src/components/store/cache.rs rename to graph/src/components/store/entity_cache.rs index e5e990343e4..50fc357a890 100644 --- a/graph/src/components/store/cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -1,13 +1,9 @@ use anyhow::anyhow; -use std::collections::{BTreeMap, HashMap}; +use std::collections::HashMap; use std::fmt::{self, Debug}; use std::sync::Arc; -use crate::blockchain::BlockPtr; -use crate::components::store::{ - self as s, Entity, EntityKey, EntityOp, EntityOperation, EntityType, -}; -use crate::data_source::DataSource; +use crate::components::store::{self as s, Entity, EntityKey, EntityOp, EntityOperation}; use crate::prelude::{Schema, ENV_VARS}; use crate::util::lfu_cache::LfuCache; @@ -32,8 +28,6 @@ pub struct EntityCache { // Marks whether updates should go in `handler_updates`. in_handler: bool, - data_sources: Vec, - /// The store is only used to read entities. pub store: Arc, @@ -51,7 +45,6 @@ impl Debug for EntityCache { pub struct ModificationsAndCache { pub modifications: Vec, - pub data_sources: Vec, pub entity_lfu_cache: LfuCache>, } @@ -62,7 +55,6 @@ impl EntityCache { updates: HashMap::new(), handler_updates: HashMap::new(), in_handler: false, - data_sources: vec![], schema: store.input_schema(), store, } @@ -77,7 +69,6 @@ impl EntityCache { updates: HashMap::new(), handler_updates: HashMap::new(), in_handler: false, - data_sources: vec![], schema: store.input_schema(), store, } @@ -109,6 +100,10 @@ impl EntityCache { // Get the current entity, apply any updates from `updates`, then // from `handler_updates`. let mut entity = self.current.get_entity(&*self.store, eref)?; + + // Always test the cache consistency in debug mode. + debug_assert!(entity == self.store.get(&eref).unwrap()); + if let Some(op) = self.updates.get(eref).cloned() { entity = op.apply_to(entity) } @@ -130,13 +125,13 @@ impl EntityCache { pub fn set(&mut self, key: EntityKey, mut entity: Entity) -> Result<(), anyhow::Error> { fn check_id(key: &EntityKey, prev_id: &str) -> Result<(), anyhow::Error> { if prev_id != key.entity_id.as_str() { - return Err(anyhow!( + Err(anyhow!( "Value of {} attribute 'id' conflicts with ID passed to `store.set()`: \ {} != {}", key.entity_type, prev_id, key.entity_id, - )); + )) } else { Ok(()) } @@ -192,12 +187,6 @@ impl EntityCache { } } - /// Add a dynamic data source - pub fn add_data_source(&mut self, data_source: &DataSource) { - self.data_sources - .push(data_source.as_stored_dynamic_data_source()); - } - fn entity_op(&mut self, key: EntityKey, op: EntityOp) { use std::collections::hash_map::Entry; let updates = match self.in_handler { @@ -246,22 +235,8 @@ impl EntityCache { // violation in the database, ensuring correctness let missing = missing.filter(|key| !self.schema.is_immutable(&key.entity_type)); - let mut missing_by_type: BTreeMap<&EntityType, Vec<&str>> = BTreeMap::new(); - for key in missing { - missing_by_type - .entry(&key.entity_type) - .or_default() - .push(&key.entity_id); - } - - for (entity_type, entities) in self.store.get_many(missing_by_type)? { - for entity in entities { - let key = EntityKey { - entity_type: entity_type.clone(), - entity_id: entity.id().unwrap().into(), - }; - self.current.insert(key, Some(entity)); - } + for (entity_key, entity) in self.store.get_many(missing.cloned().collect())? { + self.current.insert(entity_key, Some(entity)); } let mut mods = Vec::new(); @@ -314,7 +289,6 @@ impl EntityCache { Ok(ModificationsAndCache { modifications: mods, - data_sources: self.data_sources, entity_lfu_cache: self.current, }) } @@ -341,20 +315,3 @@ impl LfuCache> { } } } - -/// Represents an item retrieved from an -/// [`EthereumCallCache`](super::EthereumCallCache) implementor. -pub struct CachedEthereumCall { - /// The BLAKE3 hash that uniquely represents this cache item. The way this - /// hash is constructed is an implementation detail. - pub blake3_id: Vec, - - /// Block details related to this Ethereum call. - pub block_ptr: BlockPtr, - - /// The address to the called contract. - pub contract_address: ethabi::Address, - - /// The encoded return value of this call. - pub return_value: Vec, -} diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index eba7f9fab82..c8268846c3b 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -1,8 +1,10 @@ -mod cache; +mod entity_cache; mod err; mod traits; -pub use cache::{CachedEthereumCall, EntityCache, ModificationsAndCache}; +pub use entity_cache::{EntityCache, ModificationsAndCache}; + +use diesel::types::{FromSql, ToSql}; pub use err::StoreError; use itertools::Itertools; pub use traits::*; @@ -11,18 +13,20 @@ use futures::stream::poll_fn; use futures::{Async, Poll, Stream}; use graphql_parser::schema as s; use serde::{Deserialize, Serialize}; +use std::borrow::Borrow; use std::collections::btree_map::Entry; use std::collections::{BTreeMap, BTreeSet, HashSet}; -use std::fmt; use std::fmt::Display; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, RwLock}; use std::time::Duration; +use std::{fmt, io}; -use crate::blockchain::{Block, Blockchain}; +use crate::blockchain::Block; use crate::data::store::scalar::Bytes; use crate::data::store::*; use crate::data::value::Word; +use crate::data_source::CausalityRegion; use crate::prelude::*; /// The type name of an entity. This is the string that is used in the @@ -69,6 +73,12 @@ impl<'a> From<&s::InterfaceType<'a, String>> for EntityType { } } +impl Borrow for EntityType { + fn borrow(&self) -> &str { + &self.0 + } +} + // This conversion should only be used in tests since it makes it too // easy to convert random strings into entity types #[cfg(debug_assertions)] @@ -80,6 +90,22 @@ impl From<&str> for EntityType { impl CheapClone for EntityType {} +impl FromSql for EntityType { + fn from_sql(bytes: Option<&[u8]>) -> diesel::deserialize::Result { + let s = >::from_sql(bytes)?; + Ok(EntityType::new(s)) + } +} + +impl ToSql for EntityType { + fn to_sql( + &self, + out: &mut diesel::serialize::Output, + ) -> diesel::serialize::Result { + >::to_sql(self.0.as_str(), out) + } +} + #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct EntityFilterDerivative(bool); @@ -102,13 +128,23 @@ pub struct EntityKey { /// ID of the individual entity. pub entity_id: Word, + + /// This is the causality region of the data source that created the entity. + /// + /// In the case of an entity lookup, this is the causality region of the data source that is + /// doing the lookup. So if the entity exists but was created on a different causality region, + /// the lookup will return empty. + pub causality_region: CausalityRegion, } impl EntityKey { - pub fn data(entity_type: String, entity_id: String) -> Self { + // For use in tests only + #[cfg(debug_assertions)] + pub fn data(entity_type: impl Into, entity_id: impl Into) -> Self { Self { - entity_type: EntityType::new(entity_type), - entity_id: entity_id.into(), + entity_type: EntityType::new(entity_type.into()), + entity_id: entity_id.into().into(), + causality_region: CausalityRegion::ONCHAIN, } } } @@ -244,6 +280,24 @@ impl EntityFilter { } } +/// Holds the information needed to query a store. +#[derive(Clone, Debug, PartialEq)] +pub struct EntityOrderByChildInfo { + /// The attribute of the child entity that is used to order the results. + pub sort_by_attribute: Attribute, + /// The attribute that is used to join to the parent and child entity. + pub join_attribute: Attribute, + /// If true, the child entity is derived from the parent entity. + pub derived: bool, +} + +/// Holds the information needed to order the results of a query based on nested entities. +#[derive(Clone, Debug, PartialEq)] +pub enum EntityOrderByChild { + Object(EntityOrderByChildInfo, EntityType), + Interface(EntityOrderByChildInfo, Vec), +} + /// The order in which entities should be restored from a store. #[derive(Clone, Debug, PartialEq)] pub enum EntityOrder { @@ -251,6 +305,10 @@ pub enum EntityOrder { Ascending(String, ValueType), /// Order descending by the given attribute. Use `id` as a tie-breaker Descending(String, ValueType), + /// Order ascending by the given attribute of a child entity. Use `id` as a tie-breaker + ChildAscending(EntityOrderByChild), + /// Order descending by the given attribute of a child entity. Use `id` as a tie-breaker + ChildDescending(EntityOrderByChild), /// Order by the `id` of the entities Default, /// Do not order at all. This speeds up queries where we know that @@ -433,6 +491,8 @@ pub struct EntityQuery { pub query_id: Option, + pub trace: bool, + _force_use_of_new: (), } @@ -451,6 +511,7 @@ impl EntityQuery { range: EntityRange::first(100), logger: None, query_id: None, + trace: false, _force_use_of_new: (), } } @@ -538,7 +599,7 @@ pub enum EntityChange { impl EntityChange { pub fn for_data(subgraph_id: DeploymentHash, key: EntityKey) -> Self { Self::Data { - subgraph_id: subgraph_id, + subgraph_id, entity_type: key.entity_type, } } @@ -799,7 +860,8 @@ pub struct StoredDynamicDataSource { pub param: Option, pub context: Option, pub creation_block: Option, - pub is_offchain: bool, + pub done_at: Option, + pub causality_region: CausalityRegion, } /// An internal identifer for the specific instance of a deployment. The @@ -1065,10 +1127,7 @@ impl ReadStore for EmptyStore { Ok(None) } - fn get_many( - &self, - _ids_for_type: BTreeMap<&EntityType, Vec<&str>>, - ) -> Result>, StoreError> { + fn get_many(&self, _: BTreeSet) -> Result, StoreError> { Ok(BTreeMap::new()) } @@ -1076,3 +1135,53 @@ impl ReadStore for EmptyStore { self.schema.cheap_clone() } } + +/// An estimate of the number of entities and the number of entity versions +/// in a database table +#[derive(Clone, Debug)] +pub struct VersionStats { + pub entities: i32, + pub versions: i32, + pub tablename: String, + /// The ratio `entities / versions` + pub ratio: f64, +} + +/// Callbacks for `SubgraphStore.prune` so that callers can report progress +/// of the pruning procedure to users +#[allow(unused_variables)] +pub trait PruneReporter: Send + 'static { + fn start_analyze(&mut self) {} + fn start_analyze_table(&mut self, table: &str) {} + fn finish_analyze_table(&mut self, table: &str) {} + fn finish_analyze(&mut self, stats: &[VersionStats]) {} + + fn copy_final_start(&mut self, earliest_block: BlockNumber, final_block: BlockNumber) {} + fn copy_final_batch(&mut self, table: &str, rows: usize, total_rows: usize, finished: bool) {} + fn copy_final_finish(&mut self) {} + + fn start_switch(&mut self) {} + fn copy_nonfinal_start(&mut self, table: &str) {} + fn copy_nonfinal_batch(&mut self, table: &str, rows: usize, total_rows: usize, finished: bool) { + } + fn finish_switch(&mut self) {} + + fn finish_prune(&mut self) {} +} + +/// Represents an item retrieved from an +/// [`EthereumCallCache`](super::EthereumCallCache) implementor. +pub struct CachedEthereumCall { + /// The BLAKE3 hash that uniquely represents this cache item. The way this + /// hash is constructed is an implementation detail. + pub blake3_id: Vec, + + /// Block details related to this Ethereum call. + pub block_ptr: BlockPtr, + + /// The address to the called contract. + pub contract_address: ethabi::Address, + + /// The encoded return value of this call. + pub return_value: Vec, +} diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index 158e75aa87d..6a6ef2bc79d 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -32,6 +32,8 @@ pub trait EnsLookup: Send + Sync + 'static { /// Find the reverse of keccak256 for `hash` through looking it up in the /// rainbow table. fn find_name(&self, hash: &str) -> Result, StoreError>; + // Check if the rainbow table is filled. + fn is_table_empty(&self) -> Result; } /// An entry point for all operations that require access to the node's storage @@ -130,13 +132,22 @@ pub trait SubgraphStore: Send + Sync + 'static { /// Return a `WritableStore` that is used for indexing subgraphs. Only /// code that is part of indexing a subgraph should ever use this. The /// `logger` will be used to log important messages related to the - /// subgraph + /// subgraph. + /// + /// This function should only be called in situations where no + /// assumptions about the in-memory state of writing has been made; in + /// particular, no assumptions about whether previous writes have + /// actually been committed or not. async fn writable( self: Arc, logger: Logger, deployment: DeploymentId, ) -> Result, StoreError>; + /// Initiate a graceful shutdown of the writable that a previous call to + /// `writable` might have started + async fn stop_subgraph(&self, deployment: &DeploymentLocator) -> Result<(), StoreError>; + /// Return the minimum block pointer of all deployments with this `id` /// that we would use to query or copy from; in particular, this will /// ignore any instances of this deployment that are in the process of @@ -147,18 +158,24 @@ pub trait SubgraphStore: Send + Sync + 'static { /// Find the deployment locators for the subgraph with the given hash fn locators(&self, hash: &str) -> Result, StoreError>; + + /// This migrates subgraphs that existed before the raw_yaml column was added. + async fn set_manifest_raw_yaml( + &self, + hash: &DeploymentHash, + raw_yaml: String, + ) -> Result<(), StoreError>; } pub trait ReadStore: Send + Sync + 'static { /// Looks up an entity using the given store key at the latest block. fn get(&self, key: &EntityKey) -> Result, StoreError>; - /// Look up multiple entities as of the latest block. Returns a map of - /// entities by type. + /// Look up multiple entities as of the latest block. fn get_many( &self, - ids_for_type: BTreeMap<&EntityType, Vec<&str>>, - ) -> Result>, StoreError>; + keys: BTreeSet, + ) -> Result, StoreError>; fn input_schema(&self) -> Arc; } @@ -171,9 +188,9 @@ impl ReadStore for Arc { fn get_many( &self, - ids_for_type: BTreeMap<&EntityType, Vec<&str>>, - ) -> Result>, StoreError> { - (**self).get_many(ids_for_type) + keys: BTreeSet, + ) -> Result, StoreError> { + (**self).get_many(keys) } fn input_schema(&self) -> Arc { @@ -260,6 +277,9 @@ pub trait WritableStore: ReadStore { manifest_idx_and_name: Vec<(u32, String)>, ) -> Result, StoreError>; + /// The maximum assigned causality region. Any higher number is therefore free to be assigned. + async fn causality_region_curr_val(&self) -> Result, StoreError>; + /// Report the name of the shard in which the subgraph is stored. This /// should only be used for reporting and monitoring fn shard(&self) -> &str; @@ -396,6 +416,9 @@ pub trait ChainStore: Send + Sync + 'static { &self, block_ptr: &H256, ) -> Result, StoreError>; + + /// Clears call cache of the chain for the given `from` and `to` block number. + async fn clear_call_cache(&self, from: Option, to: Option) -> Result<(), Error>; } pub trait EthereumCallCache: Send + Sync + 'static { diff --git a/graph/src/components/subgraph/host.rs b/graph/src/components/subgraph/host.rs index 27d7d4ca56e..72af7800c66 100644 --- a/graph/src/components/subgraph/host.rs +++ b/graph/src/components/subgraph/host.rs @@ -68,6 +68,14 @@ pub trait RuntimeHost: Send + Sync + 'static { /// Block number in which this host was created. /// Returns `None` for static data sources. fn creation_block_number(&self) -> Option; + + /// Offchain data sources track done_at which is set once the + /// trigger has been processed. + fn done_at(&self) -> Option; + + /// Convenience function to avoid leaking internal representation of + /// mutable number. Calling this on OnChain Datasources is a noop. + fn set_done_at(&self, block: Option); } pub struct HostMetrics { diff --git a/graph/src/components/subgraph/instance.rs b/graph/src/components/subgraph/instance.rs index ec35c829f94..f3df2c672e4 100644 --- a/graph/src/components/subgraph/instance.rs +++ b/graph/src/components/subgraph/instance.rs @@ -21,11 +21,14 @@ pub struct BlockState { pub deterministic_errors: Vec, created_data_sources: Vec>, + // Data sources to be transacted into the store. + pub persisted_data_sources: Vec, + // Data sources created in the current handler. handler_created_data_sources: Vec>, - // offchain data sources to be removed because they've been processed. - pub offchain_to_remove: Vec, + // data source that have been processed. + pub processed_data_sources: Vec, // Marks whether a handler is currently executing. in_handler: bool, @@ -37,8 +40,9 @@ impl BlockState { entity_cache: EntityCache::with_current(Arc::new(store), lfu_cache), deterministic_errors: Vec::new(), created_data_sources: Vec::new(), + persisted_data_sources: Vec::new(), handler_created_data_sources: Vec::new(), - offchain_to_remove: Vec::new(), + processed_data_sources: Vec::new(), in_handler: false, } } @@ -50,8 +54,9 @@ impl BlockState { entity_cache, deterministic_errors, created_data_sources, + persisted_data_sources, handler_created_data_sources, - offchain_to_remove, + processed_data_sources, in_handler, } = self; @@ -61,7 +66,8 @@ impl BlockState { } deterministic_errors.extend(other.deterministic_errors); entity_cache.extend(other.entity_cache); - offchain_to_remove.extend(other.offchain_to_remove); + processed_data_sources.extend(other.processed_data_sources); + persisted_data_sources.extend(other.persisted_data_sources); } pub fn has_errors(&self) -> bool { @@ -104,4 +110,8 @@ impl BlockState { assert!(self.in_handler); self.handler_created_data_sources.push(ds); } + + pub fn persist_data_source(&mut self, ds: StoredDynamicDataSource) { + self.persisted_data_sources.push(ds) + } } diff --git a/graph/src/components/subgraph/instance_manager.rs b/graph/src/components/subgraph/instance_manager.rs index 3b1777e3df8..c04fd5237b4 100644 --- a/graph/src/components/subgraph/instance_manager.rs +++ b/graph/src/components/subgraph/instance_manager.rs @@ -16,5 +16,5 @@ pub trait SubgraphInstanceManager: Send + Sync + 'static { manifest: serde_yaml::Mapping, stop_block: Option, ); - fn stop_subgraph(&self, deployment: DeploymentLocator); + async fn stop_subgraph(&self, deployment: DeploymentLocator); } diff --git a/graph/src/components/subgraph/mod.rs b/graph/src/components/subgraph/mod.rs index cd930c85924..6976de1e2d2 100644 --- a/graph/src/components/subgraph/mod.rs +++ b/graph/src/components/subgraph/mod.rs @@ -11,7 +11,7 @@ pub use self::host::{HostMetrics, MappingError, RuntimeHost, RuntimeHostBuilder} pub use self::instance::{BlockState, DataSourceTemplateInfo}; pub use self::instance_manager::SubgraphInstanceManager; pub use self::proof_of_indexing::{ - CausalityRegion, ProofOfIndexing, ProofOfIndexingEvent, ProofOfIndexingFinisher, + PoICausalityRegion, ProofOfIndexing, ProofOfIndexingEvent, ProofOfIndexingFinisher, ProofOfIndexingVersion, SharedProofOfIndexing, }; pub use self::provider::SubgraphAssignmentProvider; diff --git a/graph/src/components/subgraph/proof_of_indexing/mod.rs b/graph/src/components/subgraph/proof_of_indexing/mod.rs index 712b84d0694..adb9ca79959 100644 --- a/graph/src/components/subgraph/proof_of_indexing/mod.rs +++ b/graph/src/components/subgraph/proof_of_indexing/mod.rs @@ -4,7 +4,7 @@ mod reference; pub use event::ProofOfIndexingEvent; pub use online::{ProofOfIndexing, ProofOfIndexingFinisher}; -pub use reference::CausalityRegion; +pub use reference::PoICausalityRegion; use atomic_refcell::AtomicRefCell; use std::sync::Arc; @@ -161,7 +161,7 @@ mod tests { subgraph_id: DeploymentHash::new("test").unwrap(), block_hash: H256::repeat_byte(1), causality_regions: hashmap! { - "eth".to_owned() => CausalityRegion { + "eth".to_owned() => PoICausalityRegion { blocks: vec! [ Block::default(), Block { @@ -188,7 +188,7 @@ mod tests { subgraph_id: DeploymentHash::new("b").unwrap(), block_hash: H256::repeat_byte(3), causality_regions: hashmap! { - "eth".to_owned() => CausalityRegion { + "eth".to_owned() => PoICausalityRegion { blocks: vec! [ Block::default(), Block { @@ -226,7 +226,7 @@ mod tests { subgraph_id: DeploymentHash::new("b").unwrap(), block_hash: H256::repeat_byte(3), causality_regions: hashmap! { - "eth".to_owned() => CausalityRegion { + "eth".to_owned() => PoICausalityRegion { blocks: vec! [ Block::default(), Block { @@ -250,7 +250,7 @@ mod tests { Block::default(), ], }, - "ipfs".to_owned() => CausalityRegion { + "ipfs".to_owned() => PoICausalityRegion { blocks: vec! [ Block::default(), Block { @@ -288,7 +288,7 @@ mod tests { subgraph_id: DeploymentHash::new("test").unwrap(), block_hash: H256::repeat_byte(1), causality_regions: hashmap! { - "eth".to_owned() => CausalityRegion { + "eth".to_owned() => PoICausalityRegion { blocks: vec! [ Block::default(), Block { diff --git a/graph/src/components/subgraph/proof_of_indexing/online.rs b/graph/src/components/subgraph/proof_of_indexing/online.rs index 013f2a8f9bb..f9d93151c34 100644 --- a/graph/src/components/subgraph/proof_of_indexing/online.rs +++ b/graph/src/components/subgraph/proof_of_indexing/online.rs @@ -145,7 +145,7 @@ impl BlockEventStream { fn write(&mut self, event: &ProofOfIndexingEvent<'_>) { let children = &[ 1, // kvp -> v - 0, // CausalityRegion.blocks: Vec + 0, // PoICausalityRegion.blocks: Vec self.block_index, // Vec -> [i] 0, // Block.events -> Vec self.vec_length, @@ -276,7 +276,7 @@ impl ProofOfIndexingFinisher { pub fn add_causality_region(&mut self, name: &str, region: &[u8]) { let mut state = Hashers::from_bytes(region); - // Finish the blocks vec by writing kvp[v], CausalityRegion.blocks.len() + // Finish the blocks vec by writing kvp[v], PoICausalityRegion.blocks.len() // + 1 is to account that the length of the blocks array for the genesis block is 1, not 0. state.write(&(self.block_number + 1), &[1, 0]); diff --git a/graph/src/components/subgraph/proof_of_indexing/reference.rs b/graph/src/components/subgraph/proof_of_indexing/reference.rs index 63d9703cc85..5c7d269d7a7 100644 --- a/graph/src/components/subgraph/proof_of_indexing/reference.rs +++ b/graph/src/components/subgraph/proof_of_indexing/reference.rs @@ -10,7 +10,7 @@ use web3::types::{Address, H256}; /// It's just way easier to check that this works, and serves as a kind of /// documentation as a side-benefit. pub struct PoI<'a> { - pub causality_regions: HashMap>, + pub causality_regions: HashMap>, pub subgraph_id: DeploymentHash, pub block_hash: H256, pub indexer: Option

, @@ -31,13 +31,13 @@ impl_stable_hash!(PoI<'_> { indexer: indexer_opt_as_bytes }); -pub struct CausalityRegion<'a> { +pub struct PoICausalityRegion<'a> { pub blocks: Vec>, } -impl_stable_hash!(CausalityRegion<'_> {blocks}); +impl_stable_hash!(PoICausalityRegion<'_> {blocks}); -impl CausalityRegion<'_> { +impl PoICausalityRegion<'_> { pub fn from_network(network: &str) -> String { format!("ethereum/{}", network) } diff --git a/graph/src/components/transaction_receipt.rs b/graph/src/components/transaction_receipt.rs index 30b2bb2deb2..dc8eaf6a730 100644 --- a/graph/src/components/transaction_receipt.rs +++ b/graph/src/components/transaction_receipt.rs @@ -6,7 +6,7 @@ use web3::types::{TransactionReceipt, H256, U256, U64}; /// Like web3::types::Receipt, but with fewer fields. -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Eq)] pub struct LightTransactionReceipt { pub transaction_hash: H256, pub transaction_index: U64, diff --git a/graph/src/data/graphql/object_or_interface.rs b/graph/src/data/graphql/object_or_interface.rs index 48053555e76..7764769a1a0 100644 --- a/graph/src/data/graphql/object_or_interface.rs +++ b/graph/src/data/graphql/object_or_interface.rs @@ -117,7 +117,7 @@ impl<'a> ObjectOrInterface<'a> { ObjectOrInterface::Object(object) => Some(vec![object]), ObjectOrInterface::Interface(interface) => schema .types_for_interface() - .get(&interface.into()) + .get(interface.name.as_str()) .map(|object_types| object_types.iter().collect()), } } @@ -131,7 +131,7 @@ impl<'a> ObjectOrInterface<'a> { ) -> bool { match self { ObjectOrInterface::Object(o) => o.name == typename, - ObjectOrInterface::Interface(i) => types_for_interface[&i.into()] + ObjectOrInterface::Interface(i) => types_for_interface[i.name.as_str()] .iter() .any(|o| o.name == typename), } diff --git a/graph/src/data/query/error.rs b/graph/src/data/query/error.rs index 887005773b6..3e64d37e5c4 100644 --- a/graph/src/data/query/error.rs +++ b/graph/src/data/query/error.rs @@ -12,15 +12,9 @@ use crate::data::subgraph::*; use crate::prelude::q; use crate::{components::store::StoreError, prelude::CacheWeight}; -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct CloneableAnyhowError(Arc); -impl Clone for CloneableAnyhowError { - fn clone(&self) -> Self { - Self(self.0.clone()) - } -} - impl From for CloneableAnyhowError { fn from(f: anyhow::Error) -> Self { Self(Arc::new(f)) @@ -109,9 +103,12 @@ impl QueryExecutionError { | EntityFieldError(_, _) | ListTypesError(_, _) | ListFilterError(_) + | ValueParseError(_, _) | AttributeTypeError(_, _) | EmptySelectionSet(_) | Unimplemented(_) + | EnumCoercionError(_, _, _, _, _) + | ScalarCoercionError(_, _, _, _) | CyclicalFragment(_) | UndefinedFragment(_) | FulltextQueryInvalidSyntax(_) @@ -119,12 +116,9 @@ impl QueryExecutionError { ListValueError(_, _) | ResolveEntitiesError(_) | RangeArgumentsError(_, _, _) - | ValueParseError(_, _) | EntityParseError(_) | StoreError(_) | Timeout - | EnumCoercionError(_, _, _, _, _) - | ScalarCoercionError(_, _, _, _) | AmbiguousDerivedFromResult(_, _, _, _) | TooComplex(_, _) | TooDeep(_) diff --git a/graph/src/data/query/query.rs b/graph/src/data/query/query.rs index 2eaf351d41d..2ca93f0cc43 100644 --- a/graph/src/data/query/query.rs +++ b/graph/src/data/query/query.rs @@ -132,14 +132,16 @@ pub struct Query { pub shape_hash: u64, pub query_text: Arc, pub variables_text: Arc, + pub trace: bool, _force_use_of_new: (), } impl Query { - pub fn new(document: q::Document, variables: Option) -> Self { + pub fn new(document: q::Document, variables: Option, trace: bool) -> Self { let shape_hash = shape_hash(&document); - let (query_text, variables_text) = if ENV_VARS.log_gql_timing() + let (query_text, variables_text) = if trace + || ENV_VARS.log_gql_timing() || (ENV_VARS.graphql.enable_validations && ENV_VARS.graphql.silent_graphql_validations) { ( @@ -158,6 +160,7 @@ impl Query { shape_hash, query_text: Arc::new(query_text), variables_text: Arc::new(variables_text), + trace, _force_use_of_new: (), } } diff --git a/graph/src/data/query/result.rs b/graph/src/data/query/result.rs index 6b8bebd8766..46446884185 100644 --- a/graph/src/data/query/result.rs +++ b/graph/src/data/query/result.rs @@ -78,6 +78,14 @@ impl QueryResults { pub fn traces(&self) -> Vec<&Trace> { self.results.iter().map(|res| &res.trace).collect() } + + pub fn errors(&self) -> Vec { + self.results + .iter() + .map(|r| r.errors.clone()) + .flatten() + .collect() + } } impl Serialize for QueryResults { @@ -91,7 +99,14 @@ impl Serialize for QueryResults { if has_errors { len += 1; } - + let first_trace = self + .results + .iter() + .find(|r| !r.trace.is_none()) + .map(|r| &r.trace); + if first_trace.is_some() { + len += 1; + } let mut state = serializer.serialize_struct("QueryResults", len)?; // Serialize data. @@ -117,7 +132,7 @@ impl Serialize for QueryResults { impl Serialize for SerError<'_> { fn serialize(&self, serializer: S) -> Result { let mut seq = serializer.serialize_seq(None)?; - for err in self.0.results.iter().map(|r| &r.errors).flatten() { + for err in self.0.results.iter().flat_map(|r| &r.errors) { seq.serialize_element(err)?; } seq.end() @@ -127,6 +142,9 @@ impl Serialize for QueryResults { state.serialize_field("errors", &SerError(self))?; } + if let Some(trace) = first_trace { + state.serialize_field("trace", trace)?; + } state.end() } } diff --git a/graph/src/data/query/trace.rs b/graph/src/data/query/trace.rs index 11a26e0a7e3..d7ca979489c 100644 --- a/graph/src/data/query/trace.rs +++ b/graph/src/data/query/trace.rs @@ -3,15 +3,18 @@ use std::{ time::Duration, }; -use serde::Serialize; +use serde::{ser::SerializeMap, Serialize}; -use crate::env::ENV_VARS; +use crate::{components::store::BlockNumber, prelude::CheapClone}; -#[derive(Debug, Serialize)] +#[derive(Debug)] pub enum Trace { None, Root { query: Arc, + variables: Arc, + query_id: String, + block: BlockNumber, elapsed: Mutex, children: Vec<(String, Trace)>, }, @@ -31,13 +34,22 @@ impl Default for Trace { } impl Trace { - pub fn root(query: Arc) -> Trace { - if ENV_VARS.log_sql_timing() || ENV_VARS.log_gql_timing() { - return Trace::Root { - query, + pub fn root( + query: &Arc, + variables: &Arc, + query_id: &str, + block: BlockNumber, + do_trace: bool, + ) -> Trace { + if do_trace { + Trace::Root { + query: query.cheap_clone(), + variables: variables.cheap_clone(), + query_id: query_id.to_string(), + block, elapsed: Mutex::new(Duration::from_millis(0)), children: Vec::new(), - }; + } } else { Trace::None } @@ -74,4 +86,58 @@ impl Trace { } } } + + pub fn is_none(&self) -> bool { + match self { + Trace::None => true, + Trace::Root { .. } | Trace::Query { .. } => false, + } + } +} + +impl Serialize for Trace { + fn serialize(&self, ser: S) -> Result + where + S: serde::Serializer, + { + match self { + Trace::None => ser.serialize_none(), + Trace::Root { + query, + variables, + query_id, + block, + elapsed, + children, + } => { + let mut map = ser.serialize_map(Some(children.len() + 2))?; + map.serialize_entry("query", query)?; + if !variables.is_empty() && variables.as_str() != "{}" { + map.serialize_entry("variables", variables)?; + } + map.serialize_entry("query_id", query_id)?; + map.serialize_entry("block", block)?; + map.serialize_entry("elapsed_ms", &elapsed.lock().unwrap().as_millis())?; + for (child, trace) in children { + map.serialize_entry(child, trace)?; + } + map.end() + } + Trace::Query { + query, + elapsed, + entity_count, + children, + } => { + let mut map = ser.serialize_map(Some(children.len() + 3))?; + map.serialize_entry("query", query)?; + map.serialize_entry("elapsed_ms", &elapsed.as_millis())?; + map.serialize_entry("entity_count", entity_count)?; + for (child, trace) in children { + map.serialize_entry(child, trace)?; + } + map.end() + } + } + } } diff --git a/graph/src/data/schema.rs b/graph/src/data/schema.rs index 20de5cdfba4..990613e407b 100644 --- a/graph/src/data/schema.rs +++ b/graph/src/data/schema.rs @@ -163,24 +163,27 @@ impl TryFrom<&str> for FulltextLanguage { } impl FulltextLanguage { - pub fn as_str(&self) -> &'static str { + /// Return the language as a valid SQL string. The string is safe to + /// directly use verbatim in a query, i.e., doesn't require being passed + /// through a bind variable + pub fn as_sql(&self) -> &'static str { match self { - Self::Simple => "simple", - Self::Danish => "danish", - Self::Dutch => "dutch", - Self::English => "english", - Self::Finnish => "finnish", - Self::French => "french", - Self::German => "german", - Self::Hungarian => "hungarian", - Self::Italian => "italian", - Self::Norwegian => "norwegian", - Self::Portugese => "portugese", - Self::Romanian => "romanian", - Self::Russian => "russian", - Self::Spanish => "spanish", - Self::Swedish => "swedish", - Self::Turkish => "turkish", + Self::Simple => "'simple'", + Self::Danish => "'danish'", + Self::Dutch => "'dutch'", + Self::English => "'english'", + Self::Finnish => "'finnish'", + Self::French => "'french'", + Self::German => "'german'", + Self::Hungarian => "'hungarian'", + Self::Italian => "'italian'", + Self::Norwegian => "'norwegian'", + Self::Portugese => "'portugese'", + Self::Romanian => "'romanian'", + Self::Russian => "'russian'", + Self::Spanish => "'spanish'", + Self::Swedish => "'swedish'", + Self::Turkish => "'turkish'", } } } diff --git a/graph/src/data/subgraph/api_version.rs b/graph/src/data/subgraph/api_version.rs index 7972d2727b4..79756399483 100644 --- a/graph/src/data/subgraph/api_version.rs +++ b/graph/src/data/subgraph/api_version.rs @@ -3,8 +3,6 @@ use semver::Version; use std::collections::BTreeSet; use thiserror::Error; -use super::SubgraphManifestValidationError; - pub const API_VERSION_0_0_2: Version = Version::new(0, 0, 2); /// This version adds a new subgraph validation step that rejects manifests whose mappings have @@ -75,13 +73,7 @@ pub(super) fn format_versions(versions: &BTreeSet) -> String { #[derive(Error, Debug, PartialEq)] #[error("Expected a single apiVersion for mappings. Found: {}.", format_versions(.0))] -pub struct DifferentMappingApiVersions(BTreeSet); - -impl From for SubgraphManifestValidationError { - fn from(versions: DifferentMappingApiVersions) -> Self { - SubgraphManifestValidationError::DifferentApiVersions(versions.0) - } -} +pub struct DifferentMappingApiVersions(pub BTreeSet); #[test] fn unified_mapping_api_version_from_iterator() { diff --git a/graph/src/data/subgraph/mod.rs b/graph/src/data/subgraph/mod.rs index 939c5bd3f65..114c2f66936 100644 --- a/graph/src/data/subgraph/mod.rs +++ b/graph/src/data/subgraph/mod.rs @@ -10,13 +10,12 @@ pub mod status; pub use features::{SubgraphFeature, SubgraphFeatureValidationError}; -use anyhow::ensure; use anyhow::{anyhow, Error}; use futures03::{future::try_join3, stream::FuturesOrdered, TryStreamExt as _}; use semver::Version; use serde::{de, ser}; use serde_yaml; -use slog::{debug, info, Logger}; +use slog::{info, Logger}; use stable_hash::{FieldAddress, StableHash}; use stable_hash_legacy::SequenceNumber; use std::{collections::BTreeSet, marker::PhantomData}; @@ -25,10 +24,11 @@ use wasmparser; use web3::types::Address; use crate::{ + bail, blockchain::{BlockPtr, Blockchain, DataSource as _}, components::{ link_resolver::LinkResolver, - store::{DeploymentLocator, StoreError, SubgraphStore}, + store::{StoreError, SubgraphStore}, }, data::{ graphql::TryFromValue, @@ -41,6 +41,7 @@ use crate::{ offchain::OFFCHAIN_KINDS, DataSource, DataSourceTemplate, UnresolvedDataSource, UnresolvedDataSourceTemplate, }, + ensure, prelude::{r, CheapClone, ENV_VARS}, }; @@ -67,7 +68,7 @@ where /// The IPFS hash used to identifiy a deployment externally, i.e., the /// `Qm..` string that `graph-cli` prints when deploying to a subgraph -#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Default)] pub struct DeploymentHash(String); impl stable_hash_legacy::StableHash for DeploymentHash { @@ -270,7 +271,7 @@ pub enum SubgraphRegistrarError { #[error("deployment assignment unchanged: {0}")] DeploymentAssignmentUnchanged(String), #[error("subgraph registrar internal query error: {0}")] - QueryExecutionError(QueryExecutionError), + QueryExecutionError(#[from] QueryExecutionError), #[error("subgraph registrar error with store: {0}")] StoreError(StoreError), #[error("subgraph validation error: {}", display_vector(.0))] @@ -278,13 +279,7 @@ pub enum SubgraphRegistrarError { #[error("subgraph deployment error: {0}")] SubgraphDeploymentError(StoreError), #[error("subgraph registrar error: {0}")] - Unknown(anyhow::Error), -} - -impl From for SubgraphRegistrarError { - fn from(e: QueryExecutionError) -> Self { - SubgraphRegistrarError::QueryExecutionError(e) - } + Unknown(#[from] anyhow::Error), } impl From for SubgraphRegistrarError { @@ -296,12 +291,6 @@ impl From for SubgraphRegistrarError { } } -impl From for SubgraphRegistrarError { - fn from(e: Error) -> Self { - SubgraphRegistrarError::Unknown(e) - } -} - impl From for SubgraphRegistrarError { fn from(e: SubgraphManifestValidationError) -> Self { SubgraphRegistrarError::ManifestValidationError(vec![e]) @@ -315,16 +304,8 @@ pub enum SubgraphAssignmentProviderError { /// Occurs when attempting to remove a subgraph that's not hosted. #[error("Subgraph with ID {0} already running")] AlreadyRunning(DeploymentHash), - #[error("Subgraph with ID {0} is not running")] - NotRunning(DeploymentLocator), #[error("Subgraph provider error: {0}")] - Unknown(anyhow::Error), -} - -impl From for SubgraphAssignmentProviderError { - fn from(e: Error) -> Self { - SubgraphAssignmentProviderError::Unknown(e) - } + Unknown(#[from] anyhow::Error), } impl From<::diesel::result::Error> for SubgraphAssignmentProviderError { @@ -357,8 +338,8 @@ pub enum SubgraphManifestValidationError { SchemaValidationError(Vec), #[error("the graft base is invalid: {0}")] GraftBaseInvalid(String), - #[error("subgraph must use a single apiVersion across its data sources. Found: {}", format_versions(.0))] - DifferentApiVersions(BTreeSet), + #[error("subgraph must use a single apiVersion across its data sources. Found: {}", format_versions(&(.0).0))] + DifferentApiVersions(#[from] DifferentMappingApiVersions), #[error(transparent)] FeatureValidationError(#[from] SubgraphFeatureValidationError), #[error("data source {0} is invalid: {1}")] @@ -368,19 +349,13 @@ pub enum SubgraphManifestValidationError { #[derive(Error, Debug)] pub enum SubgraphManifestResolveError { #[error("parse error: {0}")] - ParseError(serde_yaml::Error), + ParseError(#[from] serde_yaml::Error), #[error("subgraph is not UTF-8")] NonUtf8, #[error("subgraph is not valid YAML")] InvalidFormat, #[error("resolve error: {0}")] - ResolveError(anyhow::Error), -} - -impl From for SubgraphManifestResolveError { - fn from(e: serde_yaml::Error) -> Self { - SubgraphManifestResolveError::ParseError(e) - } + ResolveError(#[from] anyhow::Error), } /// Data source contexts are conveniently represented as entities. @@ -492,7 +467,7 @@ impl Graft { // // The developer should change their `graft.block` in the manifest // to `base.block - 1` or less. - (Some(ptr), false) if !(self.block < ptr.number) => Err(GraftBaseInvalid(format!( + (Some(ptr), false) if self.block >= ptr.number => Err(GraftBaseInvalid(format!( "failed to graft onto `{}` at block {} since it's not healthy. You can graft it starting at block {} backwards", self.base, self.block, ptr.number - 1 ))), @@ -520,7 +495,7 @@ pub struct BaseSubgraphManifest { } /// SubgraphManifest with IPFS links unresolved -type UnresolvedSubgraphManifest = BaseSubgraphManifest< +pub type UnresolvedSubgraphManifest = BaseSubgraphManifest< C, UnresolvedSchema, UnresolvedDataSource, @@ -638,35 +613,16 @@ impl SubgraphManifest { /// Entry point for resolving a subgraph definition. pub async fn resolve_from_raw( id: DeploymentHash, - mut raw: serde_yaml::Mapping, + raw: serde_yaml::Mapping, resolver: &Arc, logger: &Logger, max_spec_version: semver::Version, ) -> Result { - // Inject the IPFS hash as the ID of the subgraph into the definition. - raw.insert("id".into(), id.to_string().into()); - - // Parse the YAML data into an UnresolvedSubgraphManifest - let unresolved: UnresolvedSubgraphManifest = serde_yaml::from_value(raw.into())?; - - debug!(logger, "Features {:?}", unresolved.features); + let unresolved = UnresolvedSubgraphManifest::parse(id, raw)?; let resolved = unresolved .resolve(resolver, logger, max_spec_version) - .await - .map_err(SubgraphManifestResolveError::ResolveError)?; - - if (resolved.spec_version < SPEC_VERSION_0_0_7) - && resolved - .data_sources - .iter() - .any(|ds| OFFCHAIN_KINDS.contains(&ds.kind())) - { - return Err(SubgraphManifestResolveError::ResolveError(anyhow!( - "Offchain data sources not supported prior to {}", - SPEC_VERSION_0_0_7 - ))); - } + .await?; Ok(resolved) } @@ -709,15 +665,37 @@ impl SubgraphManifest { ) -> Result { UnifiedMappingApiVersion::try_from_versions(self.api_versions()) } + + pub fn template_idx_and_name(&self) -> impl Iterator + '_ { + // We cannot include static data sources in the map because a static data source and a + // template may have the same name in the manifest. Duplicated with + // `UnresolvedSubgraphManifest::template_idx_and_name`. + let ds_len = self.data_sources.len() as u32; + self.templates + .iter() + .map(|t| t.name().to_owned()) + .enumerate() + .map(move |(idx, name)| (ds_len + idx as u32, name)) + } } impl UnresolvedSubgraphManifest { + pub fn parse( + id: DeploymentHash, + mut raw: serde_yaml::Mapping, + ) -> Result { + // Inject the IPFS hash as the ID of the subgraph into the definition. + raw.insert("id".into(), id.to_string().into()); + + serde_yaml::from_value(raw.into()).map_err(Into::into) + } + pub async fn resolve( self, resolver: &Arc, logger: &Logger, max_spec_version: semver::Version, - ) -> Result, anyhow::Error> { + ) -> Result, SubgraphManifestResolveError> { let UnresolvedSubgraphManifest { id, spec_version, @@ -738,29 +716,29 @@ impl UnresolvedSubgraphManifest { max_spec_version, id, spec_version - )); + ).into()); } let ds_count = data_sources.len(); if ds_count as u64 + templates.len() as u64 > u32::MAX as u64 { - return Err(anyhow!( - "Subgraph has too many declared data sources and templates", - )); + return Err( + anyhow!("Subgraph has too many declared data sources and templates",).into(), + ); } let (schema, data_sources, templates) = try_join3( - schema.resolve(id.clone(), &resolver, logger), + schema.resolve(id.clone(), resolver, logger), data_sources .into_iter() .enumerate() - .map(|(idx, ds)| ds.resolve(&resolver, logger, idx as u32)) + .map(|(idx, ds)| ds.resolve(resolver, logger, idx as u32)) .collect::>() .try_collect::>(), templates .into_iter() .enumerate() .map(|(idx, template)| { - template.resolve(&resolver, logger, ds_count as u32 + idx as u32) + template.resolve(resolver, logger, ds_count as u32 + idx as u32) }) .collect::>() .try_collect::>(), @@ -778,6 +756,17 @@ impl UnresolvedSubgraphManifest { ); } + if spec_version < SPEC_VERSION_0_0_7 + && data_sources + .iter() + .any(|ds| OFFCHAIN_KINDS.contains(&ds.kind())) + { + bail!( + "Offchain data sources not supported prior to {}", + SPEC_VERSION_0_0_7 + ); + } + Ok(SubgraphManifest { id, spec_version, diff --git a/graph/src/data/subgraph/schema.rs b/graph/src/data/subgraph/schema.rs index c8e5d7f80df..11f4ed6cfea 100644 --- a/graph/src/data/subgraph/schema.rs +++ b/graph/src/data/subgraph/schema.rs @@ -1,10 +1,11 @@ //! Entity types that contain the graph-node state. -use anyhow::{anyhow, Error}; +use anyhow::{anyhow, bail, Error}; use hex; use lazy_static::lazy_static; use rand::rngs::OsRng; use rand::Rng; +use std::collections::BTreeSet; use std::str::FromStr; use std::{fmt, fmt::Display}; @@ -21,7 +22,8 @@ lazy_static! { pub static ref POI_OBJECT: EntityType = EntityType::new("Poi$".to_string()); } -#[derive(Copy, Clone, PartialEq, Eq, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Debug, Deserialize)] +#[serde(rename_all = "lowercase")] pub enum SubgraphHealth { /// Syncing without errors. Healthy, @@ -102,7 +104,7 @@ impl TryFromValue for SubgraphHealth { /// The deployment data that is needed to create a deployment pub struct DeploymentCreate { pub manifest: SubgraphManifestEntity, - pub earliest_block: Option, + pub start_block: Option, pub graft_base: Option, pub graft_block: Option, pub debug_fork: Option, @@ -110,12 +112,13 @@ pub struct DeploymentCreate { impl DeploymentCreate { pub fn new( + raw_manifest: String, source_manifest: &SubgraphManifest, - earliest_block: Option, + start_block: Option, ) -> Self { Self { - manifest: SubgraphManifestEntity::from(source_manifest), - earliest_block: earliest_block.cheap_clone(), + manifest: SubgraphManifestEntity::new(raw_manifest, source_manifest, Vec::new()), + start_block: start_block.cheap_clone(), graft_base: None, graft_block: None, debug_fork: None, @@ -134,6 +137,15 @@ impl DeploymentCreate { self.debug_fork = fork; self } + + pub fn entities_with_causality_region( + mut self, + entities_with_causality_region: BTreeSet, + ) -> Self { + self.manifest.entities_with_causality_region = + entities_with_causality_region.into_iter().collect(); + self + } } /// The representation of a subgraph deployment when reading an existing @@ -146,7 +158,10 @@ pub struct SubgraphDeploymentEntity { pub synced: bool, pub fatal_error: Option, pub non_fatal_errors: Vec, - pub earliest_block: Option, + /// The earliest block for which we have data + pub earliest_block_number: BlockNumber, + /// The block at which indexing initially started + pub start_block: Option, pub latest_block: Option, pub graft_base: Option, pub graft_block: Option, @@ -163,21 +178,61 @@ pub struct SubgraphManifestEntity { pub repository: Option, pub features: Vec, pub schema: String, + pub raw_yaml: Option, + pub entities_with_causality_region: Vec, } -impl<'a, C: Blockchain> From<&'a super::SubgraphManifest> for SubgraphManifestEntity { - fn from(manifest: &'a super::SubgraphManifest) -> Self { +impl SubgraphManifestEntity { + pub fn new( + raw_yaml: String, + manifest: &super::SubgraphManifest, + entities_with_causality_region: Vec, + ) -> Self { Self { spec_version: manifest.spec_version.to_string(), description: manifest.description.clone(), repository: manifest.repository.clone(), features: manifest.features.iter().map(|f| f.to_string()).collect(), schema: manifest.schema.document.clone().to_string(), + raw_yaml: Some(raw_yaml), + entities_with_causality_region, + } + } + + pub fn template_idx_and_name(&self) -> Result, Error> { + #[derive(Debug, Deserialize)] + struct MinimalDs { + name: String, } + #[derive(Debug, Deserialize)] + #[serde(rename_all = "camelCase")] + struct MinimalManifest { + data_sources: Vec, + #[serde(default)] + templates: Vec, + } + + let raw_yaml = match &self.raw_yaml { + Some(raw_yaml) => raw_yaml, + None => bail!("raw_yaml not present"), + }; + + let manifest: MinimalManifest = serde_yaml::from_str(raw_yaml)?; + + let ds_len = manifest.data_sources.len() as i32; + let template_idx_and_name = manifest + .templates + .iter() + .map(|t| t.name.to_owned()) + .enumerate() + .map(move |(idx, name)| (ds_len + idx as i32, name)) + .collect(); + + Ok(template_idx_and_name) } } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct SubgraphError { pub subgraph_id: DeploymentHash, pub message: String, diff --git a/graph/src/data/value.rs b/graph/src/data/value.rs index fc62ef91be9..ddb2d0a7134 100644 --- a/graph/src/data/value.rs +++ b/graph/src/data/value.rs @@ -43,6 +43,12 @@ impl From for Word { } } +impl From for String { + fn from(w: Word) -> Self { + w.0.into() + } +} + impl Serialize for Word { fn serialize(&self, serializer: S) -> Result where @@ -137,7 +143,7 @@ impl Iterator for ObjectOwningIter { type Item = (Word, Value); fn next(&mut self) -> Option { - while let Some(entry) = self.iter.next() { + for entry in self.iter.by_ref() { if let Some(key) = entry.key { return Some((key, entry.value)); } @@ -173,7 +179,7 @@ impl<'a> Iterator for ObjectIter<'a> { type Item = (&'a str, &'a Value); fn next(&mut self) -> Option { - while let Some(entry) = self.iter.next() { + for entry in self.iter.by_ref() { if let Some(key) = &entry.key { return Some((key.as_str(), &entry.value)); } diff --git a/graph/src/data_source/causality_region.rs b/graph/src/data_source/causality_region.rs new file mode 100644 index 00000000000..808538b6027 --- /dev/null +++ b/graph/src/data_source/causality_region.rs @@ -0,0 +1,83 @@ +use diesel::{ + pg::Pg, + serialize::Output, + sql_types::Integer, + types::{FromSql, ToSql}, + FromSqlRow, +}; +use std::fmt; +use std::io; + +use crate::components::subgraph::Entity; + +/// The causality region of a data source. All onchain data sources share the same causality region, +/// but each offchain data source is assigned its own. This isolates offchain data sources from +/// onchain and from each other. +/// +/// The isolation rules are: +/// 1. A data source cannot read an entity from a different causality region. +/// 2. A data source cannot update or overwrite an entity from a different causality region. +/// +/// This necessary for determinism because offchain data sources don't have a deterministic order of +/// execution, for example an IPFS file may become available at any point in time. The isolation +/// rules make the indexing result reproducible, given a set of available files. +#[derive(Debug, Copy, Clone, PartialEq, Eq, FromSqlRow, Hash, PartialOrd, Ord)] +pub struct CausalityRegion(i32); + +impl fmt::Display for CausalityRegion { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl FromSql for CausalityRegion { + fn from_sql(bytes: Option<&[u8]>) -> diesel::deserialize::Result { + >::from_sql(bytes).map(CausalityRegion) + } +} + +impl ToSql for CausalityRegion { + fn to_sql(&self, out: &mut Output) -> diesel::serialize::Result { + >::to_sql(&self.0, out) + } +} + +impl CausalityRegion { + /// The causality region of all onchain data sources. + pub const ONCHAIN: CausalityRegion = CausalityRegion(0); + + pub const fn next(self) -> Self { + CausalityRegion(self.0 + 1) + } + + pub fn from_entity(entity: &Entity) -> Self { + entity + .get("causality_region") + .and_then(|v| v.as_int()) + .map(CausalityRegion) + .unwrap_or(CausalityRegion::ONCHAIN) + } +} + +/// A subgraph will assign causality regions to offchain data sources from a sequence. +pub struct CausalityRegionSeq(pub CausalityRegion); + +impl CausalityRegionSeq { + /// Create a new sequence with the current value set to `ONCHAIN`, which is 0, therefore the + /// first produced value will be `ONCHAIN + 1`, which is 1. + const fn new() -> Self { + CausalityRegionSeq(CausalityRegion::ONCHAIN) + } + + /// A sequence with the current value set to `cr`. If `cr` is `None`, then the current value is + /// set to `ONCHAIN`, which is 0. The next produced value will be `cr + 1`. + pub fn from_current(cr: Option) -> CausalityRegionSeq { + cr.map(CausalityRegionSeq) + .unwrap_or(CausalityRegionSeq::new()) + } + + pub fn next_val(&mut self) -> CausalityRegion { + self.0 = self.0.next(); + self.0 + } +} diff --git a/graph/src/data_source/mod.rs b/graph/src/data_source/mod.rs index 2d92a27a80c..aba6498682b 100644 --- a/graph/src/data_source/mod.rs +++ b/graph/src/data_source/mod.rs @@ -1,5 +1,11 @@ +pub mod causality_region; pub mod offchain; +pub use causality_region::CausalityRegion; + +#[cfg(test)] +mod tests; + use crate::{ blockchain::{ BlockPtr, Blockchain, DataSource as _, DataSourceTemplate as _, TriggerData as _, @@ -7,8 +13,7 @@ use crate::{ }, components::{ link_resolver::LinkResolver, - store::{BlockNumber, StoredDynamicDataSource}, - subgraph::DataSourceTemplateInfo, + store::{BlockNumber, EntityType, StoredDynamicDataSource}, }, data_source::offchain::OFFCHAIN_KINDS, prelude::{CheapClone as _, DataSourceContext}, @@ -18,6 +23,7 @@ use semver::Version; use serde::{de::IntoDeserializer as _, Deserialize, Deserializer}; use slog::{Logger, SendSyncRefUnwindSafeKV}; use std::{collections::BTreeMap, fmt, sync::Arc}; +use thiserror::Error; #[derive(Debug)] pub enum DataSource { @@ -25,21 +31,53 @@ pub enum DataSource { Offchain(offchain::DataSource), } -impl TryFrom> for DataSource { - type Error = Error; +#[derive(Error, Debug)] +pub enum DataSourceCreationError { + /// The creation of the data source should be ignored. + #[error("ignoring data source creation due to invalid parameter: '{0}', error: {1:#}")] + Ignore(String, Error), - fn try_from(info: DataSourceTemplateInfo) -> Result { - match &info.template { - DataSourceTemplate::Onchain(_) => { - C::DataSource::try_from(info).map(DataSource::Onchain) - } - DataSourceTemplate::Offchain(_) => { - offchain::DataSource::try_from(info).map(DataSource::Offchain) + /// Other errors. + #[error("error creating data source: {0:#}")] + Unknown(#[from] Error), +} + +/// Which entity types a data source can read and write to. +/// +/// Currently this is only enforced on offchain data sources and templates, based on the `entities` +/// key in the manifest. This informs which entity tables need an explicit `causality_region` column +/// and which will always have `causality_region == 0`. +/// +/// Note that this is just an optimization and not sufficient for causality region isolation, since +/// generally the causality region is a property of the entity, not of the entity type. +/// +/// See also: entity-type-access +pub enum EntityTypeAccess { + Any, + Restriced(Vec), +} + +impl fmt::Display for EntityTypeAccess { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + match self { + Self::Any => write!(f, "Any"), + Self::Restriced(entities) => { + let strings = entities.iter().map(|e| e.as_str()).collect::>(); + write!(f, "{}", strings.join(", ")) } } } } +impl EntityTypeAccess { + pub fn allows(&self, entity_type: &EntityType) -> bool { + match self { + Self::Any => true, + Self::Restriced(types) => types.contains(entity_type), + } + } +} + impl DataSource { pub fn as_onchain(&self) -> Option<&C::DataSource> { match self { @@ -104,6 +142,15 @@ impl DataSource { } } + pub fn entities(&self) -> EntityTypeAccess { + match self { + // Note: Onchain data sources have an `entities` field in the manifest, but it has never + // been enforced. + Self::Onchain(_) => EntityTypeAccess::Any, + Self::Offchain(ds) => EntityTypeAccess::Restriced(ds.mapping.entities.clone()), + } + } + pub fn match_and_decode( &self, trigger: &TriggerData, @@ -125,10 +172,7 @@ impl DataSource { pub fn is_duplicate_of(&self, other: &Self) -> bool { match (self, other) { (Self::Onchain(a), Self::Onchain(b)) => a.is_duplicate_of(b), - (Self::Offchain(a), Self::Offchain(b)) => { - // See also: data-source-is-duplicate-of - a.manifest_idx == b.manifest_idx && a.source == b.source && a.context == b.context - } + (Self::Offchain(a), Self::Offchain(b)) => a.is_duplicate_of(b), _ => false, } } @@ -162,6 +206,13 @@ impl DataSource { Self::Offchain(_) => vec![], } } + + pub fn causality_region(&self) -> CausalityRegion { + match self { + Self::Onchain(_) => CausalityRegion::ONCHAIN, + Self::Offchain(ds) => ds.causality_region, + } + } } #[derive(Debug)] @@ -182,10 +233,12 @@ impl UnresolvedDataSource { .resolve(resolver, logger, manifest_idx) .await .map(DataSource::Onchain), - Self::Offchain(unresolved) => unresolved - .resolve(resolver, logger, manifest_idx) - .await - .map(DataSource::Offchain), + Self::Offchain(_unresolved) => { + anyhow::bail!( + "static file data sources are not yet supported, \\ + for details see https://github.com/graphprotocol/graph-node/issues/3864" + ); + } } } } @@ -204,6 +257,13 @@ impl DataSourceTemplate { } } + pub fn as_offchain(&self) -> Option<&offchain::DataSourceTemplate> { + match self { + Self::Onchain(_) => None, + Self::Offchain(t) => Some(&t), + } + } + pub fn into_onchain(self) -> Option { match self { Self::Onchain(ds) => Some(ds), diff --git a/graph/src/data_source/offchain.rs b/graph/src/data_source/offchain.rs index a36ca1f3fa1..b17fc31b33b 100644 --- a/graph/src/data_source/offchain.rs +++ b/graph/src/data_source/offchain.rs @@ -1,25 +1,30 @@ use crate::{ + bail, blockchain::{BlockPtr, Blockchain}, components::{ link_resolver::LinkResolver, - store::{BlockNumber, StoredDynamicDataSource}, + store::{BlockNumber, EntityType, StoredDynamicDataSource}, subgraph::DataSourceTemplateInfo, }, data::store::scalar::Bytes, data_source, + ipfs_client::CidFile, prelude::{DataSourceContext, Link}, }; use anyhow::{self, Context, Error}; -use cid::Cid; use serde::Deserialize; use slog::{info, Logger}; -use std::{fmt, sync::Arc}; +use std::{ + fmt, + sync::{atomic::AtomicI32, Arc}, +}; -use super::TriggerWithHandler; +use super::{CausalityRegion, DataSourceCreationError, TriggerWithHandler}; -pub const OFFCHAIN_KINDS: &'static [&'static str] = &["file/ipfs"]; +pub const OFFCHAIN_KINDS: &[&str] = &["file/ipfs"]; +const NOT_DONE_VALUE: i32 = -1; -#[derive(Clone, Debug)] +#[derive(Debug, Clone)] pub struct DataSource { pub kind: String, pub name: String, @@ -28,43 +33,104 @@ pub struct DataSource { pub mapping: Mapping, pub context: Arc>, pub creation_block: Option, + done_at: Arc, + pub causality_region: CausalityRegion, } -impl TryFrom> for DataSource { - type Error = Error; +impl DataSource { + pub fn new( + kind: String, + name: String, + manifest_idx: u32, + source: Source, + mapping: Mapping, + context: Arc>, + creation_block: Option, + causality_region: CausalityRegion, + ) -> Self { + Self { + kind, + name, + manifest_idx, + source, + mapping, + context, + creation_block, + done_at: Arc::new(AtomicI32::new(NOT_DONE_VALUE)), + causality_region, + } + } + + // mark this data source as processed. + pub fn mark_processed_at(&self, block_no: i32) { + assert!(block_no != NOT_DONE_VALUE); + self.done_at + .store(block_no, std::sync::atomic::Ordering::SeqCst); + } - fn try_from(info: DataSourceTemplateInfo) -> Result { + // returns `true` if the data source is processed. + pub fn is_processed(&self) -> bool { + self.done_at.load(std::sync::atomic::Ordering::SeqCst) != NOT_DONE_VALUE + } + + pub fn done_at(&self) -> Option { + match self.done_at.load(std::sync::atomic::Ordering::SeqCst) { + NOT_DONE_VALUE => None, + n => Some(n), + } + } + + pub fn set_done_at(&self, block: Option) { + let value = block.unwrap_or(NOT_DONE_VALUE); + + self.done_at + .store(value, std::sync::atomic::Ordering::SeqCst); + } +} + +impl DataSource { + pub fn from_template_info( + info: DataSourceTemplateInfo, + causality_region: CausalityRegion, + ) -> Result { let template = match info.template { data_source::DataSourceTemplate::Offchain(template) => template, data_source::DataSourceTemplate::Onchain(_) => { - anyhow::bail!("Cannot create offchain data source from onchain template") + bail!("Cannot create offchain data source from onchain template") } }; - let source = info.params.get(0).ok_or(anyhow::anyhow!( + let source = info.params.into_iter().next().ok_or(anyhow::anyhow!( "Failed to create data source from template `{}`: source parameter is missing", template.name ))?; + + let source = match source.parse() { + Ok(source) => Source::Ipfs(source), + + // Ignore data sources created with an invalid CID. + Err(e) => return Err(DataSourceCreationError::Ignore(source, e)), + }; + Ok(Self { kind: template.kind.clone(), name: template.name.clone(), manifest_idx: template.manifest_idx, - source: Source::Ipfs(source.parse()?), - mapping: template.mapping.clone(), + source, + mapping: template.mapping, context: Arc::new(info.context), creation_block: Some(info.creation_block), + done_at: Arc::new(AtomicI32::new(NOT_DONE_VALUE)), + causality_region, }) } -} -impl DataSource { pub fn match_and_decode( &self, trigger: &TriggerData, ) -> Option>> { - if self.source != trigger.source { + if self.source != trigger.source || self.is_processed() { return None; } - Some(TriggerWithHandler::new( data_source::MappingTrigger::Offchain(trigger.clone()), self.mapping.handler.clone(), @@ -74,19 +140,29 @@ impl DataSource { pub fn as_stored_dynamic_data_source(&self) -> StoredDynamicDataSource { let param = match self.source { - Source::Ipfs(link) => Bytes::from(link.to_bytes()), + Source::Ipfs(ref link) => Bytes::from(link.to_bytes()), }; + + let done_at = self.done_at.load(std::sync::atomic::Ordering::SeqCst); + let done_at = if done_at == NOT_DONE_VALUE { + None + } else { + Some(done_at) + }; + let context = self .context .as_ref() .as_ref() .map(|ctx| serde_json::to_value(&ctx).unwrap()); + StoredDynamicDataSource { manifest_idx: self.manifest_idx, param: Some(param), context, creation_block: self.creation_block, - is_offchain: true, + done_at, + causality_region: self.causality_region, } } @@ -94,17 +170,31 @@ impl DataSource { template: &DataSourceTemplate, stored: StoredDynamicDataSource, ) -> Result { - let param = stored.param.context("no param on stored data source")?; - let source = Source::Ipfs(Cid::try_from(param.as_slice().to_vec())?); - let context = Arc::new(stored.context.map(serde_json::from_value).transpose()?); + let StoredDynamicDataSource { + manifest_idx, + param, + context, + creation_block, + done_at, + causality_region, + } = stored; + + let param = param.context("no param on stored data source")?; + let cid_file = CidFile::try_from(param)?; + + let source = Source::Ipfs(cid_file); + let context = Arc::new(context.map(serde_json::from_value).transpose()?); + Ok(Self { kind: template.kind.clone(), name: template.name.clone(), - manifest_idx: stored.manifest_idx, + manifest_idx, source, mapping: template.mapping.clone(), context, - creation_block: stored.creation_block, + creation_block, + done_at: Arc::new(AtomicI32::new(done_at.unwrap_or(NOT_DONE_VALUE))), + causality_region, }) } @@ -112,21 +202,50 @@ impl DataSource { /// used as the value to be returned to mappings from the `dataSource.address()` host function. pub fn address(&self) -> Option> { match self.source { - Source::Ipfs(cid) => Some(cid.to_bytes()), + Source::Ipfs(ref cid) => Some(cid.to_bytes()), } } + + pub(super) fn is_duplicate_of(&self, b: &DataSource) -> bool { + let DataSource { + // Inferred from the manifest_idx + kind: _, + name: _, + mapping: _, + + manifest_idx, + source, + context, + + // We want to deduplicate across done status or creation block. + done_at: _, + creation_block: _, + + // The causality region is also ignored, to be able to detect duplicated file data + // sources. + // + // Note to future: This will become more complicated if we allow for example file data + // sources to create other file data sources, because which one is created first (the + // original) and which is created later (the duplicate) is no longer deterministic. One + // fix would be to check the equality of the parent causality region. + causality_region: _, + } = self; + + // See also: data-source-is-duplicate-of + manifest_idx == &b.manifest_idx && source == &b.source && context == &b.context + } } #[derive(Clone, Debug, Eq, PartialEq)] pub enum Source { - Ipfs(Cid), + Ipfs(CidFile), } #[derive(Clone, Debug)] pub struct Mapping { pub language: String, pub api_version: semver::Version, - pub entities: Vec, + pub entities: Vec, pub handler: String, pub runtime: Arc>, pub link: Link, @@ -152,23 +271,18 @@ pub struct UnresolvedMapping { pub language: String, pub file: Link, pub handler: String, - pub entities: Vec, + pub entities: Vec, } impl UnresolvedDataSource { - #[allow(unreachable_code)] - #[allow(unused_variables)] - pub async fn resolve( + #[allow(dead_code)] + pub(super) async fn resolve( self, resolver: &Arc, logger: &Logger, manifest_idx: u32, + causality_region: CausalityRegion, ) -> Result { - anyhow::bail!( - "static file data sources are not yet supported, \\ - for details see https://github.com/graphprotocol/graph-node/issues/3864" - ); - info!(logger, "Resolve offchain data source"; "name" => &self.name, "kind" => &self.kind, @@ -188,9 +302,11 @@ impl UnresolvedDataSource { kind: self.kind, name: self.name, source, - mapping: self.mapping.resolve(&*resolver, logger).await?, + mapping: self.mapping.resolve(resolver, logger).await?, context: Arc::new(None), creation_block: None, + done_at: Arc::new(AtomicI32::new(NOT_DONE_VALUE)), + causality_region, }) } } diff --git a/graph/src/data_source/tests.rs b/graph/src/data_source/tests.rs new file mode 100644 index 00000000000..30421fca84f --- /dev/null +++ b/graph/src/data_source/tests.rs @@ -0,0 +1,91 @@ +use cid::Cid; + +use crate::{ + blockchain::mock::{MockBlockchain, MockDataSource}, + components::subgraph::Entity, + ipfs_client::CidFile, + prelude::Link, +}; + +use super::{ + offchain::{Mapping, Source}, + *, +}; + +#[test] +fn offchain_duplicate() { + let a = new_datasource(); + let mut b = a.clone(); + + // Equal data sources are duplicates. + assert!(a.is_duplicate_of(&b)); + + // The causality region, the creation block and the done status are ignored in the duplicate check. + b.causality_region = a.causality_region.next(); + b.creation_block = Some(1); + b.set_done_at(Some(1)); + assert!(a.is_duplicate_of(&b)); + + // The manifest idx, the source and the context are relevant for duplicate detection. + let mut c = a.clone(); + c.manifest_idx = 1; + assert!(!a.is_duplicate_of(&c)); + + let mut c = a.clone(); + c.source = Source::Ipfs(CidFile { + cid: Cid::default(), + path: Some("/foo".into()), + }); + assert!(!a.is_duplicate_of(&c)); + + let mut c = a.clone(); + c.context = Arc::new(Some(Entity::new())); + assert!(!a.is_duplicate_of(&c)); +} + +#[test] +#[should_panic] +fn offchain_mark_processed_error() { + let x = new_datasource(); + x.mark_processed_at(-1) +} + +#[test] +fn data_source_helpers() { + let offchain = new_datasource(); + let offchain_ds = DataSource::::Offchain(offchain.clone()); + assert!(offchain_ds.causality_region() == offchain.causality_region); + assert!(offchain_ds + .as_offchain() + .unwrap() + .is_duplicate_of(&offchain)); + + let onchain = DataSource::::Onchain(MockDataSource); + assert!(onchain.causality_region() == CausalityRegion::ONCHAIN); + assert!(onchain.as_offchain().is_none()); +} + +fn new_datasource() -> offchain::DataSource { + offchain::DataSource::new( + "theKind".into(), + "theName".into(), + 0, + Source::Ipfs(CidFile { + cid: Cid::default(), + path: None, + }), + Mapping { + language: String::new(), + api_version: Version::new(0, 0, 0), + entities: vec![], + handler: String::new(), + runtime: Arc::new(vec![]), + link: Link { + link: String::new(), + }, + }, + Arc::new(None), + Some(0), + CausalityRegion::ONCHAIN.next(), + ) +} diff --git a/graph/src/env/graphql.rs b/graph/src/env/graphql.rs index 6c06b53dbc1..725d012b0ee 100644 --- a/graph/src/env/graphql.rs +++ b/graph/src/env/graphql.rs @@ -84,6 +84,17 @@ pub struct EnvVarsGraphQl { /// Set by the flag `GRAPH_GRAPHQL_MAX_OPERATIONS_PER_CONNECTION`. /// Defaults to 1000. pub max_operations_per_connection: usize, + /// Set by the flag `GRAPH_GRAPHQL_DISABLE_BOOL_FILTERS`. Off by default. + /// Disables AND/OR filters + pub disable_bool_filters: bool, + /// Set by the flag `GRAPH_GRAPHQL_DISABLE_CHILD_SORTING`. Off by default. + /// Disables child-based sorting + pub disable_child_sorting: bool, + /// Set by `GRAPH_GRAPHQL_TRACE_TOKEN`, the token to use to enable query + /// tracing for a GraphQL request. If this is set, requests that have a + /// header `X-GraphTraceQuery` set to this value will include a trace of + /// the SQL queries that were run. + pub query_trace_token: String, } // This does not print any values avoid accidentally leaking any sensitive env vars @@ -128,6 +139,9 @@ impl From for EnvVarsGraphQl { warn_result_size: x.warn_result_size.0 .0, error_result_size: x.error_result_size.0 .0, max_operations_per_connection: x.max_operations_per_connection, + disable_bool_filters: x.disable_bool_filters.0, + disable_child_sorting: x.disable_child_sorting.0, + query_trace_token: x.query_trace_token, } } } @@ -173,4 +187,10 @@ pub struct InnerGraphQl { error_result_size: WithDefaultUsize, { usize::MAX }>, #[envconfig(from = "GRAPH_GRAPHQL_MAX_OPERATIONS_PER_CONNECTION", default = "1000")] max_operations_per_connection: usize, + #[envconfig(from = "GRAPH_GRAPHQL_DISABLE_BOOL_FILTERS", default = "false")] + pub disable_bool_filters: EnvVarBoolean, + #[envconfig(from = "GRAPH_GRAPHQL_DISABLE_CHILD_SORTING", default = "false")] + pub disable_child_sorting: EnvVarBoolean, + #[envconfig(from = "GRAPH_GRAPHQL_TRACE_TOKEN", default = "")] + query_trace_token: String, } diff --git a/graph/src/env/mappings.rs b/graph/src/env/mappings.rs index 82507da569b..bb3ee2c1d30 100644 --- a/graph/src/env/mappings.rs +++ b/graph/src/env/mappings.rs @@ -36,7 +36,7 @@ pub struct EnvVarsMapping { /// The timeout for all IPFS requests. /// /// Set by the environment variable `GRAPH_IPFS_TIMEOUT` (expressed in - /// seconds). The default value is 30s. + /// seconds). The default value is 60s. pub ipfs_timeout: Duration, /// Sets the `ipfs.map` file size limit. /// @@ -48,7 +48,12 @@ pub struct EnvVarsMapping { /// Set by the environment variable `GRAPH_MAX_IPFS_FILE_BYTES` (expressed in /// bytes). Defaults to 256 MiB. pub max_ipfs_file_bytes: usize, - pub max_ipfs_concurrent_requests: u16, + + /// Limits both concurrent and per second requests to IPFS for file data sources. + /// + /// Set by the environment variable `GRAPH_IPFS_REQUEST_LIMIT`. Defaults to 100. + pub ipfs_request_limit: u16, + /// Set by the flag `GRAPH_ALLOW_NON_DETERMINISTIC_IPFS`. Off by /// default. pub allow_non_deterministic_ipfs: bool, @@ -76,7 +81,7 @@ impl From for EnvVarsMapping { ipfs_timeout: Duration::from_secs(x.ipfs_timeout_in_secs), max_ipfs_map_file_size: x.max_ipfs_map_file_size.0, max_ipfs_file_bytes: x.max_ipfs_file_bytes.0, - max_ipfs_concurrent_requests: x.max_ipfs_concurrent_requests, + ipfs_request_limit: x.ipfs_request_limit, allow_non_deterministic_ipfs: x.allow_non_deterministic_ipfs.0, } } @@ -100,14 +105,14 @@ pub struct InnerMappingHandlers { max_ipfs_cache_file_size: WithDefaultUsize, #[envconfig(from = "GRAPH_MAX_IPFS_CACHE_SIZE", default = "50")] max_ipfs_cache_size: u64, - #[envconfig(from = "GRAPH_IPFS_TIMEOUT", default = "30")] + #[envconfig(from = "GRAPH_IPFS_TIMEOUT", default = "60")] ipfs_timeout_in_secs: u64, #[envconfig(from = "GRAPH_MAX_IPFS_MAP_FILE_SIZE", default = "")] max_ipfs_map_file_size: WithDefaultUsize, #[envconfig(from = "GRAPH_MAX_IPFS_FILE_BYTES", default = "")] max_ipfs_file_bytes: WithDefaultUsize, - #[envconfig(from = "GRAPH_MAX_IPFS_CONCURRENT_REQUESTS", default = "100")] - max_ipfs_concurrent_requests: u16, + #[envconfig(from = "GRAPH_IPFS_REQUEST_LIMIT", default = "100")] + ipfs_request_limit: u16, #[envconfig(from = "GRAPH_ALLOW_NON_DETERMINISTIC_IPFS", default = "false")] allow_non_deterministic_ipfs: EnvVarBoolean, } diff --git a/graph/src/env/mod.rs b/graph/src/env/mod.rs index b7e1fc71071..78523018a99 100644 --- a/graph/src/env/mod.rs +++ b/graph/src/env/mod.rs @@ -165,9 +165,8 @@ pub struct EnvVars { /// Set by the environment variable `GRAPH_POI_ACCESS_TOKEN`. No default /// value is provided. pub poi_access_token: Option, - /// Set by the environment variable `GRAPH_SUBGRAPH_MAX_DATA_SOURCES`. No - /// default value is provided. - pub subgraph_max_data_sources: Option, + /// Set by the environment variable `GRAPH_SUBGRAPH_MAX_DATA_SOURCES`. Defaults to 1 billion. + pub subgraph_max_data_sources: usize, /// Keep deterministic errors non-fatal even if the subgraph is pending. /// Used for testing Graph Node itself. /// @@ -203,6 +202,9 @@ pub struct EnvVars { /// Set by the environment variable `EXTERNAL_WS_BASE_URL`. No default /// value is provided. pub external_ws_base_url: Option, + /// Maximum number of Dynamic Data Sources after which a Subgraph will + /// switch to using static filter. + pub static_filters_threshold: usize, } impl EnvVars { @@ -248,7 +250,7 @@ impl EnvVars { subgraph_version_switching_mode: inner.subgraph_version_switching_mode, kill_if_unresponsive: inner.kill_if_unresponsive.0, poi_access_token: inner.poi_access_token, - subgraph_max_data_sources: inner.subgraph_max_data_sources, + subgraph_max_data_sources: inner.subgraph_max_data_sources.0, disable_fail_fast: inner.disable_fail_fast.0, subgraph_error_retry_ceil: Duration::from_secs(inner.subgraph_error_retry_ceil_in_secs), enable_select_by_specific_attributes: inner.enable_select_by_specific_attributes.0, @@ -258,6 +260,7 @@ impl EnvVars { explorer_query_threshold: Duration::from_millis(inner.explorer_query_threshold_in_msec), external_http_base_url: inner.external_http_base_url, external_ws_base_url: inner.external_ws_base_url, + static_filters_threshold: inner.static_filters_threshold, }) } @@ -351,8 +354,8 @@ struct Inner { kill_if_unresponsive: EnvVarBoolean, #[envconfig(from = "GRAPH_POI_ACCESS_TOKEN")] poi_access_token: Option, - #[envconfig(from = "GRAPH_SUBGRAPH_MAX_DATA_SOURCES")] - subgraph_max_data_sources: Option, + #[envconfig(from = "GRAPH_SUBGRAPH_MAX_DATA_SOURCES", default = "1_000_000_000")] + subgraph_max_data_sources: NoUnderscores, #[envconfig(from = "GRAPH_DISABLE_FAIL_FAST", default = "false")] disable_fail_fast: EnvVarBoolean, #[envconfig(from = "GRAPH_SUBGRAPH_ERROR_RETRY_CEIL_SECS", default = "1800")] @@ -371,6 +374,9 @@ struct Inner { external_http_base_url: Option, #[envconfig(from = "EXTERNAL_WS_BASE_URL")] external_ws_base_url: Option, + // Setting this to be unrealistically high so it doesn't get triggered. + #[envconfig(from = "GRAPH_STATIC_FILTERS_THRESHOLD", default = "100000000")] + static_filters_threshold: usize, } #[derive(Clone, Debug)] diff --git a/graph/src/env/store.rs b/graph/src/env/store.rs index 077088b5b39..1132333c0dd 100644 --- a/graph/src/env/store.rs +++ b/graph/src/env/store.rs @@ -66,6 +66,9 @@ pub struct EnvVarsStore { /// Set by the environment variable `GRAPH_REMOVE_UNUSED_INTERVAL` /// (expressed in minutes). The default value is 360 minutes. pub remove_unused_interval: chrono::Duration, + /// Set by the environment variable + /// `GRAPH_STORE_RECENT_BLOCKS_CACHE_CAPACITY`. The default value is 10 blocks. + pub recent_blocks_cache_capacity: usize, // These should really be set through the configuration file, especially for // `GRAPH_STORE_CONNECTION_MIN_IDLE` and @@ -87,10 +90,10 @@ pub struct EnvVarsStore { /// done synchronously. pub write_queue_size: usize, - /// This is just in case new behavior causes issues. This can be removed - /// once the new behavior has run in the hosted service for a few days - /// without issues. - pub disable_error_for_toplevel_parents: bool, + /// How long batch operations during copying or grafting should take. + /// Set by `GRAPH_STORE_BATCH_TARGET_DURATION` (expressed in seconds). + /// The default is 180s. + pub batch_target_duration: Duration, } // This does not print any values avoid accidentally leaking any sensitive env vars @@ -123,11 +126,12 @@ impl From for EnvVarsStore { remove_unused_interval: chrono::Duration::minutes( x.remove_unused_interval_in_minutes as i64, ), + recent_blocks_cache_capacity: x.recent_blocks_cache_capacity, connection_timeout: Duration::from_millis(x.connection_timeout_in_millis), connection_min_idle: x.connection_min_idle, connection_idle_timeout: Duration::from_secs(x.connection_idle_timeout_in_secs), write_queue_size: x.write_queue_size, - disable_error_for_toplevel_parents: x.disable_error_for_toplevel_parents.0, + batch_target_duration: Duration::from_secs(x.batch_target_duration_in_secs), } } } @@ -158,6 +162,8 @@ pub struct InnerStore { connection_try_always: EnvVarBoolean, #[envconfig(from = "GRAPH_REMOVE_UNUSED_INTERVAL", default = "360")] remove_unused_interval_in_minutes: u64, + #[envconfig(from = "GRAPH_STORE_RECENT_BLOCKS_CACHE_CAPACITY", default = "10")] + recent_blocks_cache_capacity: usize, // These should really be set through the configuration file, especially for // `GRAPH_STORE_CONNECTION_MIN_IDLE` and @@ -171,6 +177,6 @@ pub struct InnerStore { connection_idle_timeout_in_secs: u64, #[envconfig(from = "GRAPH_STORE_WRITE_QUEUE", default = "5")] write_queue_size: usize, - #[envconfig(from = "GRAPH_DISABLE_ERROR_FOR_TOPLEVEL_PARENTS", default = "false")] - disable_error_for_toplevel_parents: EnvVarBoolean, + #[envconfig(from = "GRAPH_STORE_BATCH_TARGET_DURATION", default = "180")] + batch_target_duration_in_secs: u64, } diff --git a/graph/src/firehose/codec.rs b/graph/src/firehose/codec.rs index c3b81ce5bb3..5537dba153b 100644 --- a/graph/src/firehose/codec.rs +++ b/graph/src/firehose/codec.rs @@ -1,5 +1,5 @@ #[rustfmt::skip] -#[path = "sf.firehose.v1.rs"] +#[path = "sf.firehose.v2.rs"] mod pbfirehose; #[rustfmt::skip] diff --git a/graph/src/firehose/endpoints.rs b/graph/src/firehose/endpoints.rs index 344506a9a00..2582df5c119 100644 --- a/graph/src/firehose/endpoints.rs +++ b/graph/src/firehose/endpoints.rs @@ -1,18 +1,21 @@ use crate::{ + blockchain::block_stream::FirehoseCursor, blockchain::Block as BlockchainBlock, blockchain::BlockPtr, cheap_clone::CheapClone, components::store::BlockNumber, - firehose::{decode_firehose_block, ForkStep}, - prelude::{debug, info}, + firehose::decode_firehose_block, + prelude::{anyhow, debug, info}, substreams, }; + +use anyhow::bail; use futures03::StreamExt; use http::uri::{Scheme, Uri}; -use rand::prelude::IteratorRandom; use slog::Logger; -use std::{collections::BTreeMap, fmt::Display, iter, sync::Arc, time::Duration}; +use std::{collections::BTreeMap, fmt::Display, sync::Arc, time::Duration}; use tonic::{ + codegen::CompressionEncoding, metadata::MetadataValue, transport::{Channel, ClientTlsConfig}, Request, @@ -20,15 +23,28 @@ use tonic::{ use super::codec as firehose; +/// This is constant because we found this magic number of connections after +/// which the grpc connections start to hang. +/// For more details see: https://github.com/graphprotocol/graph-node/issues/3879 +pub const SUBGRAPHS_PER_CONN: usize = 100; + #[derive(Clone, Debug)] pub struct FirehoseEndpoint { pub provider: String, pub token: Option, pub filters_enabled: bool, pub compression_enabled: bool, + pub subgraph_limit: usize, channel: Channel, } +#[derive(Clone, Debug)] +pub enum SubgraphLimit { + Unlimited, + Limit(usize), + NoTraffic, +} + impl Display for FirehoseEndpoint { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { Display::fmt(self.provider.as_str(), f) @@ -42,7 +58,7 @@ impl FirehoseEndpoint { token: Option, filters_enabled: bool, compression_enabled: bool, - conn_pool_size: u16, + subgraph_limit: SubgraphLimit, ) -> Self { let uri = url .as_ref() @@ -75,15 +91,80 @@ impl FirehoseEndpoint { // Timeout on each request, so the timeout to estabilish each 'Blocks' stream. .timeout(Duration::from_secs(120)); - // Load balancing on a same endpoint is useful because it creates a connection pool. - let channel = Channel::balance_list(iter::repeat(endpoint).take(conn_pool_size as usize)); + let subgraph_limit = match subgraph_limit { + // See the comment on the constant + SubgraphLimit::Unlimited => SUBGRAPHS_PER_CONN, + // This is checked when parsing from config but doesn't hurt to be defensive. + SubgraphLimit::Limit(limit) => limit.min(SUBGRAPHS_PER_CONN), + SubgraphLimit::NoTraffic => 0, + }; FirehoseEndpoint { provider: provider.as_ref().to_string(), - channel, + channel: endpoint.connect_lazy(), token, filters_enabled, compression_enabled, + subgraph_limit, + } + } + + // The SUBGRAPHS_PER_CONN upper bound was already limited so we leave it the same + // we need to use inclusive limits (<=) because there will always be a reference + // inside FirehoseEndpoints that is not used (is always cloned). + pub fn has_subgraph_capacity(self: &Arc) -> bool { + Arc::strong_count(&self) <= self.subgraph_limit + } + + pub async fn get_block( + &self, + cursor: FirehoseCursor, + logger: &Logger, + ) -> Result + where + M: prost::Message + BlockchainBlock + Default + 'static, + { + let token_metadata = match self.token.clone() { + Some(token) => Some(MetadataValue::try_from(token.as_str())?), + None => None, + }; + + let mut client = firehose::fetch_client::FetchClient::with_interceptor( + self.channel.cheap_clone(), + move |mut r: Request<()>| { + if let Some(ref t) = token_metadata { + r.metadata_mut().insert("authorization", t.clone()); + } + + Ok(r) + }, + ) + .accept_compressed(CompressionEncoding::Gzip); + + if self.compression_enabled { + client = client.send_compressed(CompressionEncoding::Gzip); + } + + debug!( + logger, + "Connecting to firehose to retrieve block for cursor {}", cursor + ); + + let req = firehose::SingleBlockRequest { + transforms: [].to_vec(), + reference: Some(firehose::single_block_request::Reference::Cursor( + firehose::single_block_request::Cursor { + cursor: cursor.to_string(), + }, + )), + }; + let resp = client.block(req); + + match resp.await { + Ok(v) => Ok(M::decode( + v.get_ref().block.as_ref().unwrap().value.as_ref(), + )?), + Err(e) => return Err(anyhow::format_err!("firehose error {}", e)), } } @@ -109,7 +190,7 @@ impl FirehoseEndpoint { M: prost::Message + BlockchainBlock + Default + 'static, { let token_metadata = match self.token.clone() { - Some(token) => Some(MetadataValue::from_str(token.as_str())?), + Some(token) => Some(MetadataValue::try_from(token.as_str())?), None => None, }; @@ -123,10 +204,10 @@ impl FirehoseEndpoint { Ok(r) }, ) - .accept_gzip(); + .accept_compressed(CompressionEncoding::Gzip); if self.compression_enabled { - client = client.send_gzip(); + client = client.send_compressed(CompressionEncoding::Gzip); } debug!( @@ -150,7 +231,7 @@ impl FirehoseEndpoint { .blocks(firehose::Request { start_block_num: number as i64, stop_block_num: number as u64, - fork_steps: vec![ForkStep::StepNew as i32, ForkStep::StepUndo as i32], + final_blocks_only: false, ..Default::default() }) .await?; @@ -203,7 +284,7 @@ impl FirehoseEndpoint { request: firehose::Request, ) -> Result, anyhow::Error> { let token_metadata = match self.token.clone() { - Some(token) => Some(MetadataValue::from_str(token.as_str())?), + Some(token) => Some(MetadataValue::try_from(token.as_str())?), None => None, }; @@ -217,9 +298,9 @@ impl FirehoseEndpoint { Ok(r) }, ) - .accept_gzip(); + .accept_compressed(CompressionEncoding::Gzip); if self.compression_enabled { - client = client.send_gzip(); + client = client.send_compressed(CompressionEncoding::Gzip); } let response_stream = client.blocks(request).await?; @@ -233,7 +314,7 @@ impl FirehoseEndpoint { request: substreams::Request, ) -> Result, anyhow::Error> { let token_metadata = match self.token.clone() { - Some(token) => Some(MetadataValue::from_str(token.as_str())?), + Some(token) => Some(MetadataValue::try_from(token.as_str())?), None => None, }; @@ -267,10 +348,21 @@ impl FirehoseEndpoints { self.0.len() } - pub fn random(&self) -> Option<&Arc> { - // Select from the matching adapters randomly - let mut rng = rand::thread_rng(); - self.0.iter().choose(&mut rng) + // selects the FirehoseEndpoint with the least amount of references, which will help with spliting + // the load naively across the entire list. + pub fn random(&self) -> anyhow::Result> { + let endpoint = self + .0 + .iter() + .min_by_key(|x| Arc::strong_count(x)) + .ok_or(anyhow!("no available firehose endpoints"))?; + if !endpoint.has_subgraph_capacity() { + bail!("all connections saturated with {} connections, increase the firehose conn_pool_size or limit for the node", SUBGRAPHS_PER_CONN); + } + + // Cloning here ensure we have the correct count at any given time, if we return a reference it can be cloned later + // which could cause a high number of endpoints to be given away before accounting for them. + Ok(endpoint.clone()) } pub fn remove(&mut self, provider: &str) { @@ -330,3 +422,97 @@ impl FirehoseNetworks { .collect() } } + +#[cfg(test)] +mod test { + use std::{mem, sync::Arc}; + + use crate::firehose::SubgraphLimit; + + use super::{FirehoseEndpoint, FirehoseEndpoints, SUBGRAPHS_PER_CONN}; + + #[tokio::test] + async fn firehose_endpoint_errors() { + let endpoint = vec![Arc::new(FirehoseEndpoint::new( + String::new(), + "http://127.0.0.1".to_string(), + None, + false, + false, + SubgraphLimit::Unlimited, + ))]; + + let mut endpoints = FirehoseEndpoints::from(endpoint); + + let mut keep = vec![]; + for _i in 0..SUBGRAPHS_PER_CONN { + keep.push(endpoints.random().unwrap()); + } + + let err = endpoints.random().unwrap_err(); + assert!(err.to_string().contains("conn_pool_size")); + + mem::drop(keep); + endpoints.random().unwrap(); + + // Fails when empty too + endpoints.remove(""); + + let err = endpoints.random().unwrap_err(); + assert!(err.to_string().contains("no available firehose endpoints")); + } + + #[tokio::test] + async fn firehose_endpoint_with_limit() { + let endpoint = vec![Arc::new(FirehoseEndpoint::new( + String::new(), + "http://127.0.0.1".to_string(), + None, + false, + false, + SubgraphLimit::Limit(2), + ))]; + + let mut endpoints = FirehoseEndpoints::from(endpoint); + + let mut keep = vec![]; + for _ in 0..2 { + keep.push(endpoints.random().unwrap()); + } + + let err = endpoints.random().unwrap_err(); + assert!(err.to_string().contains("conn_pool_size")); + + mem::drop(keep); + endpoints.random().unwrap(); + + // Fails when empty too + endpoints.remove(""); + + let err = endpoints.random().unwrap_err(); + assert!(err.to_string().contains("no available firehose endpoints")); + } + + #[tokio::test] + async fn firehose_endpoint_no_traffic() { + let endpoint = vec![Arc::new(FirehoseEndpoint::new( + String::new(), + "http://127.0.0.1".to_string(), + None, + false, + false, + SubgraphLimit::NoTraffic, + ))]; + + let mut endpoints = FirehoseEndpoints::from(endpoint); + + let err = endpoints.random().unwrap_err(); + assert!(err.to_string().contains("conn_pool_size")); + + // Fails when empty too + endpoints.remove(""); + + let err = endpoints.random().unwrap_err(); + assert!(err.to_string().contains("no available firehose endpoints")); + } +} diff --git a/graph/src/firehose/sf.cosmos.transform.v1.rs b/graph/src/firehose/sf.cosmos.transform.v1.rs index f93fef1810f..2a8f1251991 100644 --- a/graph/src/firehose/sf.cosmos.transform.v1.rs +++ b/graph/src/firehose/sf.cosmos.transform.v1.rs @@ -1,5 +1,6 @@ +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EventTypeFilter { - #[prost(string, repeated, tag="1")] + #[prost(string, repeated, tag = "1")] pub event_types: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } diff --git a/graph/src/firehose/sf.ethereum.transform.v1.rs b/graph/src/firehose/sf.ethereum.transform.v1.rs index 35784c20623..19e07c08537 100644 --- a/graph/src/firehose/sf.ethereum.transform.v1.rs +++ b/graph/src/firehose/sf.ethereum.transform.v1.rs @@ -1,15 +1,38 @@ -/// Log and CallTo Filters, applied as 'inclusive OR' +/// CombinedFilter is a combination of "LogFilters" and "CallToFilters" +/// +/// It transforms the requested stream in two ways: +/// 1. STRIPPING +/// The block data is stripped from all transactions that don't +/// match any of the filters. +/// +/// 2. SKIPPING +/// If an "block index" covers a range containing a +/// block that does NOT match any of the filters, the block will be +/// skipped altogether, UNLESS send_all_block_headers is enabled +/// In that case, the block would still be sent, but without any +/// transactionTrace +/// +/// The SKIPPING feature only applies to historical blocks, because +/// the "block index" is always produced after the merged-blocks files +/// are produced. Therefore, the "live" blocks are never filtered out. +/// +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CombinedFilter { - #[prost(message, repeated, tag="1")] + #[prost(message, repeated, tag = "1")] pub log_filters: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag="2")] + #[prost(message, repeated, tag = "2")] pub call_filters: ::prost::alloc::vec::Vec, + /// Always send all blocks. if they don't match any log_filters or call_filters, + /// all the transactions will be filtered out, sending only the header. + #[prost(bool, tag = "3")] + pub send_all_block_headers: bool, } /// MultiLogFilter concatenates the results of each LogFilter (inclusive OR) +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MultiLogFilter { - #[prost(message, repeated, tag="1")] + #[prost(message, repeated, tag = "1")] pub log_filters: ::prost::alloc::vec::Vec, } /// LogFilter will match calls where *BOTH* @@ -17,18 +40,20 @@ pub struct MultiLogFilter { /// * the event signature (topic.0) is one of the provided event_signatures -- OR event_signatures is empty -- /// /// a LogFilter with both empty addresses and event_signatures lists is invalid and will fail. +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct LogFilter { - #[prost(bytes="vec", repeated, tag="1")] + #[prost(bytes = "vec", repeated, tag = "1")] pub addresses: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, /// corresponds to the keccak of the event signature which is stores in topic.0 - #[prost(bytes="vec", repeated, tag="2")] + #[prost(bytes = "vec", repeated, tag = "2")] pub event_signatures: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, } /// MultiCallToFilter concatenates the results of each CallToFilter (inclusive OR) +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MultiCallToFilter { - #[prost(message, repeated, tag="1")] + #[prost(message, repeated, tag = "1")] pub call_filters: ::prost::alloc::vec::Vec, } /// CallToFilter will match calls where *BOTH* @@ -36,13 +61,35 @@ pub struct MultiCallToFilter { /// * the method signature (in 4-bytes format) is one of the provided signatures -- OR signatures is empty -- /// /// a CallToFilter with both empty addresses and signatures lists is invalid and will fail. +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CallToFilter { - #[prost(bytes="vec", repeated, tag="1")] + #[prost(bytes = "vec", repeated, tag = "1")] pub addresses: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, - #[prost(bytes="vec", repeated, tag="2")] + #[prost(bytes = "vec", repeated, tag = "2")] pub signatures: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, } +/// Deprecated: LightBlock is deprecated, replaced by HeaderOnly, note however that the new transform +/// does not have any transactions traces returned, so it's not a direct replacement. +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] -pub struct LightBlock { -} +pub struct LightBlock {} +/// HeaderOnly returns only the block's header and few top-level core information for the block. Useful +/// for cases where no transactions information is required at all. +/// +/// The structure that would will have access to after: +/// +/// ```ignore +/// Block { +/// int32 ver = 1; +/// bytes hash = 2; +/// uint64 number = 3; +/// uint64 size = 4; +/// BlockHeader header = 5; +/// } +/// ``` +/// +/// Everything else will be empty. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HeaderOnly {} diff --git a/graph/src/firehose/sf.firehose.v1.rs b/graph/src/firehose/sf.firehose.v1.rs deleted file mode 100644 index d57c793a772..00000000000 --- a/graph/src/firehose/sf.firehose.v1.rs +++ /dev/null @@ -1,332 +0,0 @@ -/// For historical segments, forks are not passed -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Request { - /// Controls where the stream of blocks will start. - /// - /// The stream will start **inclusively** at the requested block num. - /// - /// When not provided, starts at first streamable block of the chain. Not all - /// chain starts at the same block number, so you might get an higher block than - /// requested when using default value of 0. - /// - /// Can be negative, will be resolved relative to the chain head block, assuming - /// a chain at head block #100, then using `-50` as the value will start at block - /// #50. If it resolves before first streamable block of chain, we assume start - /// of chain. - /// - /// If `start_cursor` is passed, this value is ignored and the stream instead starts - /// immediately after the Block pointed by the opaque `start_cursor` value. - #[prost(int64, tag="1")] - pub start_block_num: i64, - /// Controls where the stream of blocks will start which will be immediately after - /// the Block pointed by this opaque cursor. - /// - /// Obtain this value from a previously received from `Response.cursor`. - /// - /// This value takes precedence over `start_block_num`. - #[prost(string, tag="13")] - pub start_cursor: ::prost::alloc::string::String, - /// When non-zero, controls where the stream of blocks will stop. - /// - /// The stream will close **after** that block has passed so the boundary is - /// **inclusive**. - #[prost(uint64, tag="5")] - pub stop_block_num: u64, - /// Filter the steps you want to see. If not specified, defaults to all steps. - /// - /// Most common steps will be \[STEP_IRREVERSIBLE\], or [STEP_NEW, STEP_UNDO, STEP_IRREVERSIBLE]. - #[prost(enumeration="ForkStep", repeated, tag="8")] - pub fork_steps: ::prost::alloc::vec::Vec, - /// The CEL filter expression used to include transactions, specific to the target protocol, - /// works in combination with `exclude_filter_expr` value. - #[prost(string, tag="10")] - pub include_filter_expr: ::prost::alloc::string::String, - /// The CEL filter expression used to exclude transactions, specific to the target protocol, works - /// in combination with `include_filter_expr` value. - #[prost(string, tag="11")] - pub exclude_filter_expr: ::prost::alloc::string::String, - ///- EOS "handoffs:3" - ///- EOS "lib" - ///- EOS "confirms:3" - ///- ETH "confirms:200" - ///- ETH "confirms:7" - ///- SOL "commmitement:finalized" - ///- SOL "confirms:200" - #[prost(string, tag="17")] - pub irreversibility_condition: ::prost::alloc::string::String, - #[prost(message, repeated, tag="18")] - pub transforms: ::prost::alloc::vec::Vec<::prost_types::Any>, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Response { - /// Chain specific block payload, one of: - /// - sf.eosio.codec.v1.Block - /// - sf.ethereum.codec.v1.Block - /// - sf.near.codec.v1.Block - /// - sf.solana.codec.v1.Block - #[prost(message, optional, tag="1")] - pub block: ::core::option::Option<::prost_types::Any>, - #[prost(enumeration="ForkStep", tag="6")] - pub step: i32, - #[prost(string, tag="10")] - pub cursor: ::prost::alloc::string::String, -} -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum ForkStep { - StepUnknown = 0, - /// Block is new head block of the chain, that is linear with the previous block - StepNew = 1, - /// Block is now forked and should be undone, it's not the head block of the chain anymore - StepUndo = 2, - /// Block is now irreversible and can be committed to (finality is chain specific, see chain documentation for more details) - StepIrreversible = 4, -} -/// TODO: move to ethereum specific transforms -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum BlockDetails { - Full = 0, - Light = 1, -} -/// Generated client implementations. -pub mod stream_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - #[derive(Debug, Clone)] - pub struct StreamClient { - inner: tonic::client::Grpc, - } - impl StreamClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: std::convert::TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl StreamClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> StreamClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + Send + Sync, - { - StreamClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with `gzip`. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_gzip(mut self) -> Self { - self.inner = self.inner.send_gzip(); - self - } - /// Enable decompressing responses with `gzip`. - #[must_use] - pub fn accept_gzip(mut self) -> Self { - self.inner = self.inner.accept_gzip(); - self - } - pub async fn blocks( - &mut self, - request: impl tonic::IntoRequest, - ) -> Result< - tonic::Response>, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/sf.firehose.v1.Stream/Blocks", - ); - self.inner.server_streaming(request.into_request(), path, codec).await - } - } -} -/// Generated server implementations. -pub mod stream_server { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - ///Generated trait containing gRPC methods that should be implemented for use with StreamServer. - #[async_trait] - pub trait Stream: Send + Sync + 'static { - ///Server streaming response type for the Blocks method. - type BlocksStream: futures_core::Stream< - Item = Result, - > - + Send - + 'static; - async fn blocks( - &self, - request: tonic::Request, - ) -> Result, tonic::Status>; - } - #[derive(Debug)] - pub struct StreamServer { - inner: _Inner, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - } - struct _Inner(Arc); - impl StreamServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with `gzip`. - #[must_use] - pub fn accept_gzip(mut self) -> Self { - self.accept_compression_encodings.enable_gzip(); - self - } - /// Compress responses with `gzip`, if the client supports it. - #[must_use] - pub fn send_gzip(mut self) -> Self { - self.send_compression_encodings.enable_gzip(); - self - } - } - impl tonic::codegen::Service> for StreamServer - where - T: Stream, - B: Body + Send + 'static, - B::Error: Into + Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); - match req.uri().path() { - "/sf.firehose.v1.Stream/Blocks" => { - #[allow(non_camel_case_types)] - struct BlocksSvc(pub Arc); - impl tonic::server::ServerStreamingService - for BlocksSvc { - type Response = super::Response; - type ResponseStream = T::BlocksStream; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = self.0.clone(); - let fut = async move { (*inner).blocks(request).await }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let inner = self.inner.clone(); - let fut = async move { - let inner = inner.0; - let method = BlocksSvc(inner); - let codec = tonic::codec::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ); - let res = grpc.server_streaming(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) - }) - } - } - } - } - impl Clone for StreamServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - } - } - } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(self.0.clone()) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } - impl tonic::transport::NamedService for StreamServer { - const NAME: &'static str = "sf.firehose.v1.Stream"; - } -} diff --git a/graph/src/firehose/sf.firehose.v2.rs b/graph/src/firehose/sf.firehose.v2.rs new file mode 100644 index 00000000000..6a5b9d35204 --- /dev/null +++ b/graph/src/firehose/sf.firehose.v2.rs @@ -0,0 +1,623 @@ +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SingleBlockRequest { + #[prost(message, repeated, tag = "6")] + pub transforms: ::prost::alloc::vec::Vec<::prost_types::Any>, + #[prost(oneof = "single_block_request::Reference", tags = "3, 4, 5")] + pub reference: ::core::option::Option, +} +/// Nested message and enum types in `SingleBlockRequest`. +pub mod single_block_request { + /// Get the current known canonical version of a block at with this number + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct BlockNumber { + #[prost(uint64, tag = "1")] + pub num: u64, + } + /// Get the current block with specific hash and number + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct BlockHashAndNumber { + #[prost(uint64, tag = "1")] + pub num: u64, + #[prost(string, tag = "2")] + pub hash: ::prost::alloc::string::String, + } + /// Get the block that generated a specific cursor + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Cursor { + #[prost(string, tag = "1")] + pub cursor: ::prost::alloc::string::String, + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Reference { + #[prost(message, tag = "3")] + BlockNumber(BlockNumber), + #[prost(message, tag = "4")] + BlockHashAndNumber(BlockHashAndNumber), + #[prost(message, tag = "5")] + Cursor(Cursor), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SingleBlockResponse { + #[prost(message, optional, tag = "1")] + pub block: ::core::option::Option<::prost_types::Any>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Request { + /// Controls where the stream of blocks will start. + /// + /// The stream will start **inclusively** at the requested block num. + /// + /// When not provided, starts at first streamable block of the chain. Not all + /// chain starts at the same block number, so you might get an higher block than + /// requested when using default value of 0. + /// + /// Can be negative, will be resolved relative to the chain head block, assuming + /// a chain at head block #100, then using `-50` as the value will start at block + /// #50. If it resolves before first streamable block of chain, we assume start + /// of chain. + /// + /// If `start_cursor` is given, this value is ignored and the stream instead starts + /// immediately after the Block pointed by the opaque `start_cursor` value. + #[prost(int64, tag = "1")] + pub start_block_num: i64, + /// Controls where the stream of blocks will start which will be immediately after + /// the Block pointed by this opaque cursor. + /// + /// Obtain this value from a previously received `Response.cursor`. + /// + /// This value takes precedence over `start_block_num`. + #[prost(string, tag = "2")] + pub cursor: ::prost::alloc::string::String, + /// When non-zero, controls where the stream of blocks will stop. + /// + /// The stream will close **after** that block has passed so the boundary is + /// **inclusive**. + #[prost(uint64, tag = "3")] + pub stop_block_num: u64, + /// With final_block_only, you only receive blocks with STEP_FINAL + /// Default behavior will send blocks as STEP_NEW, with occasional STEP_UNDO + #[prost(bool, tag = "4")] + pub final_blocks_only: bool, + #[prost(message, repeated, tag = "10")] + pub transforms: ::prost::alloc::vec::Vec<::prost_types::Any>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Response { + /// Chain specific block payload, ex: + /// - sf.eosio.type.v1.Block + /// - sf.ethereum.type.v1.Block + /// - sf.near.type.v1.Block + #[prost(message, optional, tag = "1")] + pub block: ::core::option::Option<::prost_types::Any>, + #[prost(enumeration = "ForkStep", tag = "6")] + pub step: i32, + #[prost(string, tag = "10")] + pub cursor: ::prost::alloc::string::String, +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ForkStep { + StepUnset = 0, + /// Incoming block + StepNew = 1, + /// A reorg caused this specific block to be excluded from the chain + StepUndo = 2, + /// Block is now final and can be committed (finality is chain specific, + /// see chain documentation for more details) + StepFinal = 3, +} +impl ForkStep { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ForkStep::StepUnset => "STEP_UNSET", + ForkStep::StepNew => "STEP_NEW", + ForkStep::StepUndo => "STEP_UNDO", + ForkStep::StepFinal => "STEP_FINAL", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "STEP_UNSET" => Some(Self::StepUnset), + "STEP_NEW" => Some(Self::StepNew), + "STEP_UNDO" => Some(Self::StepUndo), + "STEP_FINAL" => Some(Self::StepFinal), + _ => None, + } + } +} +/// Generated client implementations. +pub mod stream_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct StreamClient { + inner: tonic::client::Grpc, + } + impl StreamClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: std::convert::TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl StreamClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> StreamClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + StreamClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + pub async fn blocks( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/sf.firehose.v2.Stream/Blocks", + ); + self.inner.server_streaming(request.into_request(), path, codec).await + } + } +} +/// Generated client implementations. +pub mod fetch_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct FetchClient { + inner: tonic::client::Grpc, + } + impl FetchClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: std::convert::TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl FetchClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> FetchClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + FetchClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + pub async fn block( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/sf.firehose.v2.Fetch/Block", + ); + self.inner.unary(request.into_request(), path, codec).await + } + } +} +/// Generated server implementations. +pub mod stream_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with StreamServer. + #[async_trait] + pub trait Stream: Send + Sync + 'static { + /// Server streaming response type for the Blocks method. + type BlocksStream: futures_core::Stream< + Item = Result, + > + + Send + + 'static; + async fn blocks( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + } + #[derive(Debug)] + pub struct StreamServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + } + struct _Inner(Arc); + impl StreamServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + } + impl tonic::codegen::Service> for StreamServer + where + T: Stream, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/sf.firehose.v2.Stream/Blocks" => { + #[allow(non_camel_case_types)] + struct BlocksSvc(pub Arc); + impl tonic::server::ServerStreamingService + for BlocksSvc { + type Response = super::Response; + type ResponseStream = T::BlocksStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { (*inner).blocks(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = BlocksSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.server_streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for StreamServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(self.0.clone()) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for StreamServer { + const NAME: &'static str = "sf.firehose.v2.Stream"; + } +} +/// Generated server implementations. +pub mod fetch_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with FetchServer. + #[async_trait] + pub trait Fetch: Send + Sync + 'static { + async fn block( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + } + #[derive(Debug)] + pub struct FetchServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + } + struct _Inner(Arc); + impl FetchServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + } + impl tonic::codegen::Service> for FetchServer + where + T: Fetch, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/sf.firehose.v2.Fetch/Block" => { + #[allow(non_camel_case_types)] + struct BlockSvc(pub Arc); + impl tonic::server::UnaryService + for BlockSvc { + type Response = super::SingleBlockResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { (*inner).block(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = BlockSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for FetchServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(self.0.clone()) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for FetchServer { + const NAME: &'static str = "sf.firehose.v2.Fetch"; + } +} diff --git a/graph/src/firehose/sf.near.transform.v1.rs b/graph/src/firehose/sf.near.transform.v1.rs index 86972b6cc54..1b02d2b415e 100644 --- a/graph/src/firehose/sf.near.transform.v1.rs +++ b/graph/src/firehose/sf.near.transform.v1.rs @@ -1,8 +1,9 @@ +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BasicReceiptFilter { - #[prost(string, repeated, tag="1")] + #[prost(string, repeated, tag = "1")] pub accounts: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - #[prost(message, repeated, tag="2")] + #[prost(message, repeated, tag = "2")] pub prefix_and_suffix_pairs: ::prost::alloc::vec::Vec, } /// PrefixSuffixPair applies a logical AND to prefix and suffix when both fields are non-empty. @@ -12,10 +13,11 @@ pub struct BasicReceiptFilter { /// * {prefix="",suffix=""} is invalid /// /// Note that the suffix will usually have a TLD, ex: "mydomain.near" or "mydomain.testnet" +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct PrefixSuffixPair { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub prefix: ::prost::alloc::string::String, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub suffix: ::prost::alloc::string::String, } diff --git a/graph/src/ipfs_client.rs b/graph/src/ipfs_client.rs index c99d83b1cb7..1e5141ce2b9 100644 --- a/graph/src/ipfs_client.rs +++ b/graph/src/ipfs_client.rs @@ -1,14 +1,76 @@ use crate::prelude::CheapClone; +use anyhow::anyhow; use anyhow::Error; use bytes::Bytes; +use cid::Cid; use futures03::Stream; use http::header::CONTENT_LENGTH; use http::Uri; use reqwest::multipart; use serde::Deserialize; +use std::fmt::Display; use std::time::Duration; use std::{str::FromStr, sync::Arc}; +/// Represents a file on Ipfs. This file can be the CID or a path within a folder CID. +/// The path cannot have a prefix (ie CID/hello.json would be cid: CID path: "hello.json") +#[derive(Debug, Clone, Default, Eq, PartialEq, Hash)] +pub struct CidFile { + pub cid: Cid, + pub path: Option, +} + +impl Display for CidFile { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let str = match self.path { + Some(ref f) => format!("{}/{}", self.cid, f), + None => self.cid.to_string(), + }; + f.write_str(&str) + } +} + +impl CidFile { + pub fn to_bytes(&self) -> Vec { + self.to_string().as_bytes().to_vec() + } +} + +impl TryFrom for CidFile { + type Error = anyhow::Error; + + fn try_from(value: crate::data::store::scalar::Bytes) -> Result { + let str = String::from_utf8(value.to_vec())?; + + Self::from_str(&str) + } +} + +/// The string should not have a prefix and only one slash after the CID is removed, everything +/// else is considered a file path. If this is malformed, it will fail to find the file. +impl FromStr for CidFile { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + if s.is_empty() { + return Err(anyhow!("cid can't be empty")); + } + + let cid_str: String = s.chars().take_while(|c| *c != '/').collect(); + let cid = Cid::from_str(&cid_str)?; + + // if cid was the only content or if it's just slash terminated. + if cid_str.len() == s.len() || s.len() + 1 == cid_str.len() { + return Ok(CidFile { cid, path: None }); + } + + let file: String = s[cid_str.len() + 1..].to_string(); + let path = if file.is_empty() { None } else { Some(file) }; + + Ok(CidFile { cid, path }) + } +} + #[derive(Clone, Copy, PartialEq, Eq)] pub enum StatApi { Block, @@ -167,3 +229,102 @@ impl IpfsClient { .and_then(|x| x) } } + +#[cfg(test)] +mod test { + use std::str::FromStr; + + use anyhow::anyhow; + use cid::Cid; + + use crate::ipfs_client::CidFile; + + #[test] + fn test_cid_parsing() { + let cid_str = "bafyreibjo4xmgaevkgud7mbifn3dzp4v4lyaui4yvqp3f2bqwtxcjrdqg4"; + let cid = Cid::from_str(cid_str).unwrap(); + + struct Case<'a> { + name: &'a str, + input: String, + path: String, + expected: Result, + } + + let cases = vec![ + Case { + name: "correct no slashes, no file", + input: cid_str.to_string(), + path: cid_str.to_string(), + expected: Ok(CidFile { + cid: cid, + path: None, + }), + }, + Case { + name: "correct with file path", + input: format!("{}/file.json", cid), + path: format!("{}/file.json", cid_str), + expected: Ok(CidFile { + cid: cid, + path: Some("file.json".into()), + }), + }, + Case { + name: "correct cid with trailing slash", + input: format!("{}/", cid), + path: format!("{}", cid), + expected: Ok(CidFile { + cid: cid, + path: None, + }), + }, + Case { + name: "incorrect, empty", + input: "".to_string(), + path: "".to_string(), + expected: Err(anyhow!("cid can't be empty")), + }, + Case { + name: "correct, two slahes", + input: format!("{}//", cid), + path: format!("{}//", cid), + expected: Ok(CidFile { + cid: cid, + path: Some("/".into()), + }), + }, + Case { + name: "incorrect, leading slahes", + input: format!("/ipfs/{}/file.json", cid), + path: "".to_string(), + expected: Err(anyhow!("Input too short")), + }, + Case { + name: "correct syntax, invalid CID", + input: "notacid/file.json".to_string(), + path: "".to_string(), + expected: Err(anyhow!("Failed to parse multihash")), + }, + ]; + + for case in cases { + let f = CidFile::from_str(&case.input); + + match case.expected { + Ok(cid_file) => { + assert!(f.is_ok(), "case: {}", case.name); + let f = f.unwrap(); + assert_eq!(f, cid_file, "case: {}", case.name); + assert_eq!(f.to_string(), case.path, "case: {}", case.name); + } + Err(err) => assert_eq!( + f.unwrap_err().to_string(), + err.to_string(), + "case: {}", + case.name + ), + } + } + } +} diff --git a/graph/src/lib.rs b/graph/src/lib.rs index 8d8073fc013..eb9b3bbb121 100644 --- a/graph/src/lib.rs +++ b/graph/src/lib.rs @@ -75,9 +75,11 @@ pub mod prelude { pub use futures03::sink::SinkExt as _; pub use futures03::stream::{StreamExt as _, TryStreamExt}; pub use hex; + pub use isatty; pub use lazy_static::lazy_static; pub use prost; pub use rand; + pub use regex; pub use reqwest; pub use serde; pub use serde_derive::{Deserialize, Serialize}; @@ -114,18 +116,17 @@ pub mod prelude { CounterVec, Gauge, GaugeVec, Histogram, HistogramOpts, HistogramVec, MetricsRegistry, Opts, PrometheusError, Registry, }; - pub use crate::components::server::admin::JsonRpcServer; pub use crate::components::server::index_node::IndexNodeServer; - pub use crate::components::server::metrics::MetricsServer; pub use crate::components::server::query::GraphQLServer; pub use crate::components::server::subscription::SubscriptionServer; pub use crate::components::store::{ AttributeNames, BlockNumber, CachedEthereumCall, ChainStore, Child, ChildMultiplicity, EntityCache, EntityChange, EntityChangeOperation, EntityCollection, EntityFilter, - EntityLink, EntityModification, EntityOperation, EntityOrder, EntityQuery, EntityRange, - EntityWindow, EthereumCallCache, ParentLink, PartialBlockPtr, PoolWaitStats, QueryStore, - QueryStoreManager, StoreError, StoreEvent, StoreEventStream, StoreEventStreamBox, - SubgraphStore, UnfailOutcome, WindowAttribute, BLOCK_NUMBER_MAX, + EntityLink, EntityModification, EntityOperation, EntityOrder, EntityOrderByChild, + EntityOrderByChildInfo, EntityQuery, EntityRange, EntityWindow, EthereumCallCache, + ParentLink, PartialBlockPtr, PoolWaitStats, QueryStore, QueryStoreManager, StoreError, + StoreEvent, StoreEventStream, StoreEventStreamBox, SubgraphStore, UnfailOutcome, + WindowAttribute, BLOCK_NUMBER_MAX, }; pub use crate::components::subgraph::{ BlockState, DataSourceTemplateInfo, HostMetrics, RuntimeHost, RuntimeHostBuilder, diff --git a/graph/src/log/elastic.rs b/graph/src/log/elastic.rs index 1a6294b0a8a..cbc49810beb 100644 --- a/graph/src/log/elastic.rs +++ b/graph/src/log/elastic.rs @@ -8,6 +8,7 @@ use std::time::Duration; use chrono::prelude::{SecondsFormat, Utc}; use futures03::TryFutureExt; use http::header::CONTENT_TYPE; +use prometheus::Counter; use reqwest; use reqwest::Client; use serde::ser::Serializer as SerdeSerializer; @@ -92,7 +93,8 @@ impl HashMapKVSerializer { impl Serializer for HashMapKVSerializer { fn emit_arguments(&mut self, key: Key, val: &fmt::Arguments) -> slog::Result { - Ok(self.kvs.push((key.into(), format!("{}", val)))) + self.kvs.push((key.into(), format!("{}", val))); + Ok(()) } } @@ -125,7 +127,8 @@ impl SimpleKVSerializer { impl Serializer for SimpleKVSerializer { fn emit_arguments(&mut self, key: Key, val: &fmt::Arguments) -> slog::Result { - Ok(self.kvs.push((key.into(), format!("{}", val)))) + self.kvs.push((key.into(), format!("{}", val))); + Ok(()) } } @@ -173,15 +176,21 @@ pub struct ElasticDrainConfig { pub struct ElasticDrain { config: ElasticDrainConfig, error_logger: Logger, + logs_sent_counter: Counter, logs: Arc>>, } impl ElasticDrain { /// Creates a new `ElasticDrain`. - pub fn new(config: ElasticDrainConfig, error_logger: Logger) -> Self { + pub fn new( + config: ElasticDrainConfig, + error_logger: Logger, + logs_sent_counter: Counter, + ) -> Self { let drain = ElasticDrain { config, error_logger, + logs_sent_counter, logs: Arc::new(Mutex::new(vec![])), }; drain.periodically_flush_logs(); @@ -190,6 +199,7 @@ impl ElasticDrain { fn periodically_flush_logs(&self) { let flush_logger = self.error_logger.clone(); + let logs_sent_counter = self.logs_sent_counter.clone(); let logs = self.logs.clone(); let config = self.config.clone(); let mut interval = tokio::time::interval(self.config.flush_interval); @@ -201,7 +211,6 @@ impl ElasticDrain { let logs = logs.clone(); let config = config.clone(); - let flush_logger = flush_logger.clone(); let logs_to_send = { let mut logs = logs.lock().unwrap(); let logs_to_send = (*logs).clone(); @@ -215,11 +224,7 @@ impl ElasticDrain { continue; } - debug!( - flush_logger, - "Flushing {} logs to Elasticsearch", - logs_to_send.len() - ); + logs_sent_counter.inc_by(logs_to_send.len() as f64); // The Elasticsearch batch API takes requests with the following format: // ```ignore @@ -380,8 +385,12 @@ impl Drain for ElasticDrain { /// /// Uses `error_logger` to print any Elasticsearch logging errors, /// so they don't go unnoticed. -pub fn elastic_logger(config: ElasticDrainConfig, error_logger: Logger) -> Logger { - let elastic_drain = ElasticDrain::new(config, error_logger).fuse(); +pub fn elastic_logger( + config: ElasticDrainConfig, + error_logger: Logger, + logs_sent_counter: Counter, +) -> Logger { + let elastic_drain = ElasticDrain::new(config, error_logger, logs_sent_counter).fuse(); let async_drain = slog_async::Async::new(elastic_drain) .chan_size(20000) .build() diff --git a/graph/src/log/factory.rs b/graph/src/log/factory.rs index 8565c5624ff..1b126a6995d 100644 --- a/graph/src/log/factory.rs +++ b/graph/src/log/factory.rs @@ -1,5 +1,9 @@ +use std::sync::Arc; + +use prometheus::Counter; use slog::*; +use crate::components::metrics::MetricsRegistry; use crate::components::store::DeploymentLocator; use crate::log::elastic::*; use crate::log::split::*; @@ -20,14 +24,20 @@ pub struct ComponentLoggerConfig { pub struct LoggerFactory { parent: Logger, elastic_config: Option, + metrics_registry: Arc, } impl LoggerFactory { /// Creates a new factory using a parent logger and optional Elasticsearch configuration. - pub fn new(logger: Logger, elastic_config: Option) -> Self { + pub fn new( + logger: Logger, + elastic_config: Option, + metrics_registry: Arc, + ) -> Self { Self { parent: logger, elastic_config, + metrics_registry, } } @@ -36,6 +46,7 @@ impl LoggerFactory { Self { parent, elastic_config: self.elastic_config.clone(), + metrics_registry: self.metrics_registry.clone(), } } @@ -68,6 +79,7 @@ impl LoggerFactory { max_retries: ENV_VARS.elastic_search_max_retries, }, term_logger.clone(), + self.logs_sent_counter(None), ), ) }) @@ -98,9 +110,20 @@ impl LoggerFactory { max_retries: ENV_VARS.elastic_search_max_retries, }, term_logger.clone(), + self.logs_sent_counter(Some(loc.hash.as_str())), ), ) }) .unwrap_or(term_logger) } + + fn logs_sent_counter(&self, deployment: Option<&str>) -> Counter { + self.metrics_registry + .global_deployment_counter( + "graph_elasticsearch_logs_sent", + "Count of logs sent to Elasticsearch endpoint", + deployment.unwrap_or(""), + ) + .unwrap() + } } diff --git a/graph/src/log/mod.rs b/graph/src/log/mod.rs index 6b2841332ce..60bbbcd5153 100644 --- a/graph/src/log/mod.rs +++ b/graph/src/log/mod.rs @@ -165,7 +165,7 @@ where } } - write!(decorator, "\n")?; + writeln!(decorator)?; decorator.flush()?; Ok(()) diff --git a/graph/src/runtime/mod.rs b/graph/src/runtime/mod.rs index f86945f4bbf..4b7109c6f8e 100644 --- a/graph/src/runtime/mod.rs +++ b/graph/src/runtime/mod.rs @@ -330,11 +330,12 @@ pub enum IndexForAscTypeId { CosmosValidatorSetUpdates = 1559, CosmosValidatorUpdate = 1560, CosmosVersionParams = 1561, - + CosmosMessageData = 1562, + CosmosTransactionContext = 1563, // Continue to add more Cosmos type IDs here. // e.g.: - // NextCosmosType = 1562, - // AnotherCosmosType = 1563, + // NextCosmosType = 1564, + // AnotherCosmosType = 1565, // ... // LastCosmosType = 2499, @@ -426,7 +427,7 @@ impl std::error::Error for DeterministicHostError {} #[derive(thiserror::Error, Debug)] pub enum HostExportError { #[error("{0:#}")] - Unknown(anyhow::Error), + Unknown(#[from] anyhow::Error), #[error("{0:#}")] PossibleReorg(anyhow::Error), @@ -435,12 +436,6 @@ pub enum HostExportError { Deterministic(anyhow::Error), } -impl From for HostExportError { - fn from(e: anyhow::Error) -> Self { - HostExportError::Unknown(e) - } -} - impl From for HostExportError { fn from(value: DeterministicHostError) -> Self { match value { diff --git a/graph/src/substreams/sf.substreams.v1.rs b/graph/src/substreams/sf.substreams.v1.rs index 0f81ea7fc97..99ab0c0cf58 100644 --- a/graph/src/substreams/sf.substreams.v1.rs +++ b/graph/src/substreams/sf.substreams.v1.rs @@ -1,174 +1,231 @@ +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Request { - #[prost(int64, tag="1")] + #[prost(int64, tag = "1")] pub start_block_num: i64, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub start_cursor: ::prost::alloc::string::String, - #[prost(uint64, tag="3")] + #[prost(uint64, tag = "3")] pub stop_block_num: u64, - #[prost(enumeration="ForkStep", repeated, tag="4")] + #[prost(enumeration = "ForkStep", repeated, tag = "4")] pub fork_steps: ::prost::alloc::vec::Vec, - #[prost(string, tag="5")] + #[prost(string, tag = "5")] pub irreversibility_condition: ::prost::alloc::string::String, - #[prost(message, optional, tag="6")] + /// By default, the engine runs in developer mode, with richer and deeper output, + /// * support for multiple `output_modules`, of `store` and `map` kinds + /// * support for `initial_store_snapshot_for_modules` + /// * log outputs for output modules + /// + /// With `production_mode`, however, you trade off functionality for high speed, where it: + /// * restricts the possible requested `output_modules` to a single mapper module, + /// * turns off support for `initial_store_snapshot_for_modules`, + /// * still streams output linearly, with a cursor, but at higher speeds + /// * and purges log outputs from responses. + #[prost(bool, tag = "9")] + pub production_mode: bool, + #[prost(message, optional, tag = "6")] pub modules: ::core::option::Option, - #[prost(string, repeated, tag="7")] + #[prost(string, repeated, tag = "7")] pub output_modules: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - #[prost(string, repeated, tag="8")] - pub initial_store_snapshot_for_modules: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(string, repeated, tag = "8")] + pub initial_store_snapshot_for_modules: ::prost::alloc::vec::Vec< + ::prost::alloc::string::String, + >, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Response { - #[prost(oneof="response::Message", tags="1, 2, 3, 4")] + #[prost(oneof = "response::Message", tags = "5, 1, 2, 3, 4")] pub message: ::core::option::Option, } /// Nested message and enum types in `Response`. pub mod response { + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Message { + /// Always sent first + #[prost(message, tag = "5")] + Session(super::SessionInit), /// Progress of data preparation, before sending in the stream of `data` events. - #[prost(message, tag="1")] + #[prost(message, tag = "1")] Progress(super::ModulesProgress), - #[prost(message, tag="2")] + #[prost(message, tag = "2")] SnapshotData(super::InitialSnapshotData), - #[prost(message, tag="3")] + #[prost(message, tag = "3")] SnapshotComplete(super::InitialSnapshotComplete), - #[prost(message, tag="4")] + #[prost(message, tag = "4")] Data(super::BlockScopedData), } } +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SessionInit { + #[prost(string, tag = "1")] + pub trace_id: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct InitialSnapshotComplete { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub cursor: ::prost::alloc::string::String, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct InitialSnapshotData { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub module_name: ::prost::alloc::string::String, - #[prost(message, optional, tag="2")] + #[prost(message, optional, tag = "2")] pub deltas: ::core::option::Option, - #[prost(uint64, tag="4")] + #[prost(uint64, tag = "4")] pub sent_keys: u64, - #[prost(uint64, tag="3")] + #[prost(uint64, tag = "3")] pub total_keys: u64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BlockScopedData { - #[prost(message, repeated, tag="1")] + #[prost(message, repeated, tag = "1")] pub outputs: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag="3")] + #[prost(message, optional, tag = "3")] pub clock: ::core::option::Option, - #[prost(enumeration="ForkStep", tag="6")] + #[prost(enumeration = "ForkStep", tag = "6")] pub step: i32, - #[prost(string, tag="10")] + #[prost(string, tag = "10")] pub cursor: ::prost::alloc::string::String, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ModuleOutput { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, - #[prost(string, repeated, tag="4")] - pub logs: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(string, repeated, tag = "4")] + pub debug_logs: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// LogsTruncated is a flag that tells you if you received all the logs or if they /// were truncated because you logged too much (fixed limit currently is set to 128 KiB). - #[prost(bool, tag="5")] - pub logs_truncated: bool, - #[prost(oneof="module_output::Data", tags="2, 3")] + #[prost(bool, tag = "5")] + pub debug_logs_truncated: bool, + #[prost(bool, tag = "6")] + pub cached: bool, + #[prost(oneof = "module_output::Data", tags = "2, 3")] pub data: ::core::option::Option, } /// Nested message and enum types in `ModuleOutput`. pub mod module_output { + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Data { - #[prost(message, tag="2")] + #[prost(message, tag = "2")] MapOutput(::prost_types::Any), - #[prost(message, tag="3")] - StoreDeltas(super::StoreDeltas), + /// StoreDeltas are produced for store modules in development mode. + /// It is not possible to retrieve store models in production, with parallelization + /// enabled. If you need the deltas directly, write a pass through mapper module + /// that will get them down to you. + #[prost(message, tag = "3")] + DebugStoreDeltas(super::StoreDeltas), } } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ModulesProgress { - #[prost(message, repeated, tag="1")] + #[prost(message, repeated, tag = "1")] pub modules: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ModuleProgress { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, - #[prost(oneof="module_progress::Type", tags="2, 3, 4, 5")] + #[prost(oneof = "module_progress::Type", tags = "2, 3, 4, 5")] pub r#type: ::core::option::Option, } /// Nested message and enum types in `ModuleProgress`. pub mod module_progress { + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ProcessedRange { - #[prost(message, repeated, tag="1")] + #[prost(message, repeated, tag = "1")] pub processed_ranges: ::prost::alloc::vec::Vec, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct InitialState { - #[prost(uint64, tag="2")] + #[prost(uint64, tag = "2")] pub available_up_to_block: u64, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ProcessedBytes { - #[prost(uint64, tag="1")] + #[prost(uint64, tag = "1")] pub total_bytes_read: u64, - #[prost(uint64, tag="2")] + #[prost(uint64, tag = "2")] pub total_bytes_written: u64, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Failed { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub reason: ::prost::alloc::string::String, - #[prost(string, repeated, tag="2")] + #[prost(string, repeated, tag = "2")] pub logs: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// FailureLogsTruncated is a flag that tells you if you received all the logs or if they /// were truncated because you logged too much (fixed limit currently is set to 128 KiB). - #[prost(bool, tag="3")] + #[prost(bool, tag = "3")] pub logs_truncated: bool, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Type { - #[prost(message, tag="2")] + #[prost(message, tag = "2")] ProcessedRanges(ProcessedRange), - #[prost(message, tag="3")] + #[prost(message, tag = "3")] InitialState(InitialState), - #[prost(message, tag="4")] + #[prost(message, tag = "4")] ProcessedBytes(ProcessedBytes), - #[prost(message, tag="5")] + #[prost(message, tag = "5")] Failed(Failed), } } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BlockRange { - #[prost(uint64, tag="1")] + #[prost(uint64, tag = "2")] pub start_block: u64, - #[prost(uint64, tag="2")] + #[prost(uint64, tag = "3")] pub end_block: u64, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct StoreDeltas { - #[prost(message, repeated, tag="1")] + #[prost(message, repeated, tag = "1")] pub deltas: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct StoreDelta { - #[prost(enumeration="store_delta::Operation", tag="1")] + #[prost(enumeration = "store_delta::Operation", tag = "1")] pub operation: i32, - #[prost(uint64, tag="2")] + #[prost(uint64, tag = "2")] pub ordinal: u64, - #[prost(string, tag="3")] + #[prost(string, tag = "3")] pub key: ::prost::alloc::string::String, - #[prost(bytes="vec", tag="4")] + #[prost(bytes = "vec", tag = "4")] pub old_value: ::prost::alloc::vec::Vec, - #[prost(bytes="vec", tag="5")] + #[prost(bytes = "vec", tag = "5")] pub new_value: ::prost::alloc::vec::Vec, } /// Nested message and enum types in `StoreDelta`. pub mod store_delta { - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum Operation { Unset = 0, @@ -176,57 +233,87 @@ pub mod store_delta { Update = 2, Delete = 3, } + impl Operation { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Operation::Unset => "UNSET", + Operation::Create => "CREATE", + Operation::Update => "UPDATE", + Operation::Delete => "DELETE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNSET" => Some(Self::Unset), + "CREATE" => Some(Self::Create), + "UPDATE" => Some(Self::Update), + "DELETE" => Some(Self::Delete), + _ => None, + } + } + } } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Output { - #[prost(uint64, tag="1")] + #[prost(uint64, tag = "1")] pub block_num: u64, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub block_id: ::prost::alloc::string::String, - #[prost(message, optional, tag="4")] + #[prost(message, optional, tag = "4")] pub timestamp: ::core::option::Option<::prost_types::Timestamp>, - #[prost(message, optional, tag="10")] + #[prost(message, optional, tag = "10")] pub value: ::core::option::Option<::prost_types::Any>, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Modules { - #[prost(message, repeated, tag="1")] + #[prost(message, repeated, tag = "1")] pub modules: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag="2")] + #[prost(message, repeated, tag = "2")] pub binaries: ::prost::alloc::vec::Vec, } /// Binary represents some code compiled to its binary form. +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Binary { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub r#type: ::prost::alloc::string::String, - #[prost(bytes="vec", tag="2")] + #[prost(bytes = "vec", tag = "2")] pub content: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Module { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, - #[prost(uint32, tag="4")] + #[prost(uint32, tag = "4")] pub binary_index: u32, - #[prost(string, tag="5")] + #[prost(string, tag = "5")] pub binary_entrypoint: ::prost::alloc::string::String, - #[prost(message, repeated, tag="6")] + #[prost(message, repeated, tag = "6")] pub inputs: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag="7")] + #[prost(message, optional, tag = "7")] pub output: ::core::option::Option, - #[prost(uint64, tag="8")] + #[prost(uint64, tag = "8")] pub initial_block: u64, - #[prost(oneof="module::Kind", tags="2, 3")] + #[prost(oneof = "module::Kind", tags = "2, 3")] pub kind: ::core::option::Option, } /// Nested message and enum types in `Module`. pub mod module { + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct KindMap { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub output_type: ::prost::alloc::string::String, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct KindStore { /// The `update_policy` determines the functions available to mutate the store @@ -237,14 +324,24 @@ pub mod module { /// cumulates keys from block 1M to 2M. When we want to use this /// store as a dependency for a downstream module, we will merge the /// two stores according to this policy. - #[prost(enumeration="kind_store::UpdatePolicy", tag="1")] + #[prost(enumeration = "kind_store::UpdatePolicy", tag = "1")] pub update_policy: i32, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub value_type: ::prost::alloc::string::String, } /// Nested message and enum types in `KindStore`. pub mod kind_store { - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum UpdatePolicy { Unset = 0, @@ -258,110 +355,185 @@ pub mod module { Min = 4, /// Provides a store where you can `max_*()` keys, where two stores merge by leaving the maximum value. Max = 5, + /// Provides a store where you can `append()` keys, where two stores merge by concatenating the bytes in order. + Append = 6, + } + impl UpdatePolicy { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + UpdatePolicy::Unset => "UPDATE_POLICY_UNSET", + UpdatePolicy::Set => "UPDATE_POLICY_SET", + UpdatePolicy::SetIfNotExists => "UPDATE_POLICY_SET_IF_NOT_EXISTS", + UpdatePolicy::Add => "UPDATE_POLICY_ADD", + UpdatePolicy::Min => "UPDATE_POLICY_MIN", + UpdatePolicy::Max => "UPDATE_POLICY_MAX", + UpdatePolicy::Append => "UPDATE_POLICY_APPEND", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UPDATE_POLICY_UNSET" => Some(Self::Unset), + "UPDATE_POLICY_SET" => Some(Self::Set), + "UPDATE_POLICY_SET_IF_NOT_EXISTS" => Some(Self::SetIfNotExists), + "UPDATE_POLICY_ADD" => Some(Self::Add), + "UPDATE_POLICY_MIN" => Some(Self::Min), + "UPDATE_POLICY_MAX" => Some(Self::Max), + "UPDATE_POLICY_APPEND" => Some(Self::Append), + _ => None, + } + } } } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Input { - #[prost(oneof="input::Input", tags="1, 2, 3")] + #[prost(oneof = "input::Input", tags = "1, 2, 3")] pub input: ::core::option::Option, } /// Nested message and enum types in `Input`. pub mod input { + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Source { /// ex: "sf.ethereum.type.v1.Block" - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub r#type: ::prost::alloc::string::String, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Map { /// ex: "block_to_pairs" - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub module_name: ::prost::alloc::string::String, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Store { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub module_name: ::prost::alloc::string::String, - #[prost(enumeration="store::Mode", tag="2")] + #[prost(enumeration = "store::Mode", tag = "2")] pub mode: i32, } /// Nested message and enum types in `Store`. pub mod store { - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] #[repr(i32)] pub enum Mode { Unset = 0, Get = 1, Deltas = 2, } + impl Mode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Mode::Unset => "UNSET", + Mode::Get => "GET", + Mode::Deltas => "DELTAS", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNSET" => Some(Self::Unset), + "GET" => Some(Self::Get), + "DELTAS" => Some(Self::Deltas), + _ => None, + } + } + } } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Input { - #[prost(message, tag="1")] + #[prost(message, tag = "1")] Source(Source), - #[prost(message, tag="2")] + #[prost(message, tag = "2")] Map(Map), - #[prost(message, tag="3")] + #[prost(message, tag = "3")] Store(Store), } } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Output { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub r#type: ::prost::alloc::string::String, } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Kind { - #[prost(message, tag="2")] + #[prost(message, tag = "2")] KindMap(KindMap), - #[prost(message, tag="3")] + #[prost(message, tag = "3")] KindStore(KindStore), } } -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Clock { - #[prost(string, tag="1")] - pub id: ::prost::alloc::string::String, - #[prost(uint64, tag="2")] - pub number: u64, - #[prost(message, optional, tag="3")] - pub timestamp: ::core::option::Option<::prost_types::Timestamp>, -} +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Package { /// Needs to be one so this file can be used _directly_ as a /// buf `Image` andor a ProtoSet for grpcurl and other tools - #[prost(message, repeated, tag="1")] + #[prost(message, repeated, tag = "1")] pub proto_files: ::prost::alloc::vec::Vec<::prost_types::FileDescriptorProto>, - #[prost(uint64, tag="5")] + #[prost(uint64, tag = "5")] pub version: u64, - #[prost(message, optional, tag="6")] + #[prost(message, optional, tag = "6")] pub modules: ::core::option::Option, - #[prost(message, repeated, tag="7")] + #[prost(message, repeated, tag = "7")] pub module_meta: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag="8")] + #[prost(message, repeated, tag = "8")] pub package_meta: ::prost::alloc::vec::Vec, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct PackageMetadata { - #[prost(string, tag="1")] + #[prost(string, tag = "1")] pub version: ::prost::alloc::string::String, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub url: ::prost::alloc::string::String, - #[prost(string, tag="3")] + #[prost(string, tag = "3")] pub name: ::prost::alloc::string::String, - #[prost(string, tag="4")] + #[prost(string, tag = "4")] pub doc: ::prost::alloc::string::String, } +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ModuleMetadata { /// Corresponds to the index in `Package.metadata.package_meta` - #[prost(uint64, tag="1")] + #[prost(uint64, tag = "1")] pub package_index: u64, - #[prost(string, tag="2")] + #[prost(string, tag = "2")] pub doc: ::prost::alloc::string::String, } +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Clock { + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + #[prost(uint64, tag = "2")] + pub number: u64, + #[prost(message, optional, tag = "3")] + pub timestamp: ::core::option::Option<::prost_types::Timestamp>, +} #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum ForkStep { @@ -373,10 +545,35 @@ pub enum ForkStep { /// Block is now irreversible and can be committed to (finality is chain specific, see chain documentation for more details) StepIrreversible = 4, } +impl ForkStep { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ForkStep::StepUnknown => "STEP_UNKNOWN", + ForkStep::StepNew => "STEP_NEW", + ForkStep::StepUndo => "STEP_UNDO", + ForkStep::StepIrreversible => "STEP_IRREVERSIBLE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "STEP_UNKNOWN" => Some(Self::StepUnknown), + "STEP_NEW" => Some(Self::StepNew), + "STEP_UNDO" => Some(Self::StepUndo), + "STEP_IRREVERSIBLE" => Some(Self::StepIrreversible), + _ => None, + } + } +} /// Generated client implementations. pub mod stream_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; + use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct StreamClient { inner: tonic::client::Grpc, @@ -403,6 +600,10 @@ pub mod stream_client { let inner = tonic::client::Grpc::new(inner); Self { inner } } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } pub fn with_interceptor( inner: T, interceptor: F, @@ -422,19 +623,19 @@ pub mod stream_client { { StreamClient::new(InterceptedService::new(inner, interceptor)) } - /// Compress requests with `gzip`. + /// Compress requests with the given encoding. /// /// This requires the server to support it otherwise it might respond with an /// error. #[must_use] - pub fn send_gzip(mut self) -> Self { - self.inner = self.inner.send_gzip(); + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); self } - /// Enable decompressing responses with `gzip`. + /// Enable decompressing responses. #[must_use] - pub fn accept_gzip(mut self) -> Self { - self.inner = self.inner.accept_gzip(); + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); self } pub async fn blocks( @@ -465,10 +666,10 @@ pub mod stream_client { pub mod stream_server { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; - ///Generated trait containing gRPC methods that should be implemented for use with StreamServer. + /// Generated trait containing gRPC methods that should be implemented for use with StreamServer. #[async_trait] pub trait Stream: Send + Sync + 'static { - ///Server streaming response type for the Blocks method. + /// Server streaming response type for the Blocks method. type BlocksStream: futures_core::Stream< Item = Result, > @@ -507,16 +708,16 @@ pub mod stream_server { { InterceptedService::new(Self::new(inner), interceptor) } - /// Enable decompressing requests with `gzip`. + /// Enable decompressing requests with the given encoding. #[must_use] - pub fn accept_gzip(mut self) -> Self { - self.accept_compression_encodings.enable_gzip(); + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); self } - /// Compress responses with `gzip`, if the client supports it. + /// Compress responses with the given encoding, if the client supports it. #[must_use] - pub fn send_gzip(mut self) -> Self { - self.send_compression_encodings.enable_gzip(); + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); self } } @@ -610,7 +811,7 @@ pub mod stream_server { write!(f, "{:?}", self.0) } } - impl tonic::transport::NamedService for StreamServer { + impl tonic::server::NamedService for StreamServer { const NAME: &'static str = "sf.substreams.v1.Stream"; } } diff --git a/graph/src/util/error.rs b/graph/src/util/error.rs index 1b3cb884467..bd49644fe42 100644 --- a/graph/src/util/error.rs +++ b/graph/src/util/error.rs @@ -3,17 +3,26 @@ macro_rules! ensure { ($cond:expr, $msg:literal $(,)?) => { if !$cond { - return Err(From::from($crate::prelude::anyhow::anyhow!($msg))); + return Err(From::from($crate::prelude::anyhow::anyhow!($msg))) } }; ($cond:expr, $err:expr $(,)?) => { if !$cond { - return Err(From::from($crate::prelude::anyhow::anyhow!($err))); + return Err(From::from($crate::prelude::anyhow::anyhow!($err))) } }; ($cond:expr, $fmt:expr, $($arg:tt)*) => { if !$cond { - return Err(From::from($crate::prelude::anyhow::anyhow!($fmt, $($arg)*))); + return Err(From::from($crate::prelude::anyhow::anyhow!($fmt, $($arg)*))) } }; } + +// `bail!` from `anyhow`, but calling `from`. +// For context see https://github.com/dtolnay/anyhow/issues/112#issuecomment-704549251. +#[macro_export] +macro_rules! bail { + ($($err:tt)*) => { + return Err(anyhow::anyhow!($($err)*).into()) + }; +} diff --git a/graph/src/util/monitored.rs b/graph/src/util/monitored.rs index c0f7147b416..3008772c1e2 100644 --- a/graph/src/util/monitored.rs +++ b/graph/src/util/monitored.rs @@ -29,7 +29,9 @@ impl MonitoredVecDeque { pub fn pop_front(&mut self) -> Option { let item = self.vec_deque.pop_front(); self.depth.set(self.vec_deque.len() as f64); - self.popped.inc(); + if item.is_some() { + self.popped.inc(); + } item } diff --git a/graph/src/util/timed_cache.rs b/graph/src/util/timed_cache.rs index 1d2e6c7f373..20ac7ba49fd 100644 --- a/graph/src/util/timed_cache.rs +++ b/graph/src/util/timed_cache.rs @@ -90,6 +90,21 @@ impl TimedCache { .find(move |entry| pred(entry.value.as_ref())) .map(|entry| entry.value.clone()) } + + /// Remove an entry from the cache. If there was an entry for `key`, + /// return the value associated with it and whether the entry is still + /// live + pub fn remove(&self, key: &Q) -> Option<(Arc, bool)> + where + K: Borrow + Eq + Hash, + Q: Hash + Eq, + { + self.entries + .write() + .unwrap() + .remove(key) + .map(|CacheEntry { value, expires }| (value, expires >= Instant::now())) + } } #[test] diff --git a/graph/tests/entity_cache.rs b/graph/tests/entity_cache.rs index 10935fd7ddc..ffe2388d058 100644 --- a/graph/tests/entity_cache.rs +++ b/graph/tests/entity_cache.rs @@ -2,10 +2,11 @@ use async_trait::async_trait; use graph::blockchain::block_stream::FirehoseCursor; use graph::blockchain::BlockPtr; use graph::data::subgraph::schema::{SubgraphError, SubgraphHealth}; +use graph::data_source::CausalityRegion; use graph::prelude::{Schema, StopwatchMetrics, StoreError, UnfailOutcome}; use lazy_static::lazy_static; use slog::Logger; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, BTreeSet}; use std::sync::Arc; use graph::components::store::{ @@ -13,7 +14,7 @@ use graph::components::store::{ }; use graph::{ components::store::{DeploymentId, DeploymentLocator}, - prelude::{anyhow, DeploymentHash, Entity, EntityCache, EntityModification, Value}, + prelude::{DeploymentHash, Entity, EntityCache, EntityModification, Value}, }; lazy_static! { @@ -37,33 +38,24 @@ lazy_static! { } struct MockStore { - get_many_res: BTreeMap>, + get_many_res: BTreeMap, } impl MockStore { - fn new(get_many_res: BTreeMap>) -> Self { + fn new(get_many_res: BTreeMap) -> Self { Self { get_many_res } } } impl ReadStore for MockStore { fn get(&self, key: &EntityKey) -> Result, StoreError> { - match self.get_many_res.get(&key.entity_type) { - Some(entities) => Ok(entities - .iter() - .find(|entity| entity.id().ok().as_deref() == Some(key.entity_id.as_str())) - .cloned()), - None => Err(StoreError::Unknown(anyhow!( - "nothing for type {}", - key.entity_type - ))), - } + Ok(self.get_many_res.get(&key).cloned()) } fn get_many( &self, - _ids_for_type: BTreeMap<&EntityType, Vec<&str>>, - ) -> Result>, StoreError> { + _keys: BTreeSet, + ) -> Result, StoreError> { Ok(self.get_many_res.clone()) } @@ -158,6 +150,10 @@ impl WritableStore for MockStore { async fn flush(&self) -> Result<(), StoreError> { unimplemented!() } + + async fn causality_region_curr_val(&self) -> Result, StoreError> { + unimplemented!() + } } fn make_band(id: &'static str, data: Vec<(&str, Value)>) -> (EntityKey, Entity) { @@ -165,6 +161,7 @@ fn make_band(id: &'static str, data: Vec<(&str, Value)>) -> (EntityKey, Entity) EntityKey { entity_type: EntityType::new("Band".to_string()), entity_id: id.into(), + causality_region: CausalityRegion::ONCHAIN, }, Entity::from(data), ) @@ -178,7 +175,7 @@ fn sort_by_entity_key(mut mods: Vec) -> Vec, -) -> BTreeMap> { +fn entity_version_map(entity_type: &str, entities: Vec) -> BTreeMap { let mut map = BTreeMap::new(); - map.insert(EntityType::from(entity_type), entities); + for entity in entities { + let key = EntityKey { + entity_type: EntityType::new(entity_type.to_string()), + entity_id: entity.id().unwrap().into(), + causality_region: CausalityRegion::ONCHAIN, + }; + map.insert(key, entity); + } map } @@ -252,7 +253,7 @@ fn overwrite_modifications() { }; let store = Arc::new(store); - let mut cache = EntityCache::new(store.clone()); + let mut cache = EntityCache::new(store); let (mogwai_key, mogwai_data) = make_band( "mogwai", @@ -313,7 +314,7 @@ fn consecutive_modifications() { }; let store = Arc::new(store); - let mut cache = EntityCache::new(store.clone()); + let mut cache = EntityCache::new(store); // First, add "founded" and change the "label". let (update_key, update_data) = make_band( @@ -324,14 +325,14 @@ fn consecutive_modifications() { ("label", "Rock Action Records".into()), ], ); - cache.set(update_key.clone(), update_data.clone()).unwrap(); + cache.set(update_key, update_data).unwrap(); // Then, just reset the "label". let (update_key, update_data) = make_band( "mogwai", vec![("id", "mogwai".into()), ("label", Value::Null)], ); - cache.set(update_key.clone(), update_data.clone()).unwrap(); + cache.set(update_key.clone(), update_data).unwrap(); // We expect a single overwrite modification for the above that leaves "id" // and "name" untouched, sets "founded" and removes the "label" field. diff --git a/graphql/Cargo.toml b/graphql/Cargo.toml index 44c8313bcee..17bd166e8d1 100644 --- a/graphql/Cargo.toml +++ b/graphql/Cargo.toml @@ -1,24 +1,24 @@ [package] name = "graph-graphql" -version = "0.27.0" -edition = "2021" +version.workspace = true +edition.workspace = true [dependencies] crossbeam = "0.8" graph = { path = "../graph" } graphql-parser = "0.4.0" -graphql-tools = "0.2.0" +graphql-tools = "0.2.1" indexmap = "1.9" Inflector = "0.11.3" lazy_static = "1.2.0" stable-hash_legacy = { version = "0.3.3", package = "stable-hash" } -stable-hash = { version = "0.4.2"} +stable-hash = { version = "0.4.2" } defer = "0.1" parking_lot = "0.12" anyhow = "1.0" async-recursion = "1.0.0" [dev-dependencies] -pretty_assertions = "1.2.1" +pretty_assertions = "1.3.0" test-store = { path = "../store/test-store" } graph-chain-ethereum = { path = "../chain/ethereum" } diff --git a/graphql/src/execution/execution.rs b/graphql/src/execution/execution.rs index 88e993a2b60..d1a23d0d93a 100644 --- a/graphql/src/execution/execution.rs +++ b/graphql/src/execution/execution.rs @@ -204,6 +204,9 @@ where /// Records whether this was a cache hit, used for logging. pub(crate) cache_status: AtomicCell, + + /// Whether to include an execution trace in the result + pub trace: bool, } pub(crate) fn get_field<'a>( @@ -236,6 +239,7 @@ where // `cache_status` is a dead value for the introspection context. cache_status: AtomicCell::new(CacheStatus::Miss), + trace: ENV_VARS.log_sql_timing(), } } } @@ -505,22 +509,19 @@ async fn execute_selection_set_to_map<'a>( let field_type = sast::get_field(object_type, &field.name).unwrap(); // Check if we have the value already. - let field_value = prefetched_object - .as_mut() - .map(|o| { - // Prefetched objects are associated to `prefetch:response_key`. - if let Some(val) = o.remove(&format!("prefetch:{}", response_key)) { - return Some(val); - } + let field_value = prefetched_object.as_mut().and_then(|o| { + // Prefetched objects are associated to `prefetch:response_key`. + if let Some(val) = o.remove(&format!("prefetch:{}", response_key)) { + return Some(val); + } - // Scalars and scalar lists are associated to the field name. - // If the field has more than one response key, we have to clone. - match multiple_response_keys.contains(field.name.as_str()) { - false => o.remove(&field.name), - true => o.get(&field.name).cloned(), - } - }) - .flatten(); + // Scalars and scalar lists are associated to the field name. + // If the field has more than one response key, we have to clone. + match multiple_response_keys.contains(field.name.as_str()) { + false => o.remove(&field.name), + true => o.get(&field.name).cloned(), + } + }); if field.name.as_str() == "__typename" && field_value.is_none() { results.push((response_key, r::Value::String(object_type.name.clone()))); diff --git a/graphql/src/execution/query.rs b/graphql/src/execution/query.rs index f0d4edd5b82..5f07e68d42c 100644 --- a/graphql/src/execution/query.rs +++ b/graphql/src/execution/query.rs @@ -2,10 +2,8 @@ use graph::data::graphql::DocumentExt as _; use graph::data::value::Object; use graphql_parser::Pos; use graphql_tools::validation::rules::*; -use graphql_tools::validation::utils::ValidationError; use graphql_tools::validation::validate::{validate, ValidationPlan}; use lazy_static::lazy_static; -use parking_lot::Mutex; use std::collections::{BTreeMap, HashMap, HashSet}; use std::hash::{Hash, Hasher}; use std::iter::FromIterator; @@ -18,7 +16,8 @@ use graph::data::query::QueryExecutionError; use graph::data::query::{Query as GraphDataQuery, QueryVariables}; use graph::data::schema::ApiSchema; use graph::prelude::{ - info, o, q, r, s, warn, BlockNumber, CheapClone, GraphQLMetrics, Logger, TryFromValue, ENV_VARS, + info, o, q, r, s, warn, BlockNumber, CheapClone, DeploymentHash, GraphQLMetrics, Logger, + TryFromValue, ENV_VARS, }; use crate::execution::ast as a; @@ -27,11 +26,6 @@ use crate::schema::ast::{self as sast}; use crate::values::coercion; use crate::{execution::get_field, schema::api::ErrorPolicy}; -lazy_static! { - static ref GRAPHQL_VALIDATION_CACHE: Mutex>> = - Mutex::new(HashMap::>::new()); -} - lazy_static! { static ref GRAPHQL_VALIDATION_PLAN: ValidationPlan = ValidationPlan::from(if !ENV_VARS.graphql.enable_validations { @@ -148,28 +142,14 @@ fn validate_query( logger: &Logger, query: &GraphDataQuery, document: &s::Document, + metrics: &Arc, + id: &DeploymentHash, ) -> Result<(), Vec> { - let errors = { - let cached = GRAPHQL_VALIDATION_CACHE - .lock() - .get(&query.shape_hash) - .cloned(); - match cached { - Some(cached) => cached, - None => { - let validation_errors = - validate(&document, &query.document, &GRAPHQL_VALIDATION_PLAN); - GRAPHQL_VALIDATION_CACHE - .lock() - .insert(query.shape_hash, validation_errors.clone()); - validation_errors - } - } - }; + let validation_errors = validate(document, &query.document, &GRAPHQL_VALIDATION_PLAN); - if !errors.is_empty() { + if !validation_errors.is_empty() { if !ENV_VARS.graphql.silent_graphql_validations { - return Err(errors + return Err(validation_errors .into_iter() .map(|e| { QueryExecutionError::ValidationError( @@ -184,8 +164,15 @@ fn validate_query( "GraphQL Validation failure"; "query" => &query.query_text, "variables" => &query.variables_text, - "errors" => format!("[{:?}]", errors.iter().map(|e| e.message.clone()).collect::>().join(", ")) + "errors" => format!("[{:?}]", validation_errors.iter().map(|e| e.message.clone()).collect::>().join(", ")) ); + + let error_codes = validation_errors + .iter() + .map(|e| e.error_code) + .collect::>(); + + metrics.observe_query_validation_error(error_codes, id); } } @@ -206,8 +193,20 @@ impl Query { max_depth: u8, metrics: Arc, ) -> Result, Vec> { + let query_hash = { + let mut hasher = DefaultHasher::new(); + query.query_text.hash(&mut hasher); + query.variables_text.hash(&mut hasher); + hasher.finish() + }; + let query_id = format!("{:x}-{:x}", query.shape_hash, query_hash); + let logger = logger.new(o!( + "subgraph_id" => schema.id().clone(), + "query_id" => query_id.clone() + )); + let validation_phase_start = Instant::now(); - validate_query(logger, &query, &schema.document())?; + validate_query(&logger, &query, schema.document(), &metrics, schema.id())?; metrics.observe_query_validation(validation_phase_start.elapsed(), schema.id()); let mut operation = None; @@ -242,18 +241,6 @@ impl Query { } }; - let query_hash = { - let mut hasher = DefaultHasher::new(); - query.query_text.hash(&mut hasher); - query.variables_text.hash(&mut hasher); - hasher.finish() - }; - let query_id = format!("{:x}-{:x}", query.shape_hash, query_hash); - let logger = logger.new(o!( - "subgraph_id" => schema.id().clone(), - "query_id" => query_id.clone() - )); - let start = Instant::now(); let root_type = match kind { Kind::Query => schema.query_type.as_ref(), @@ -646,8 +633,7 @@ impl<'s> RawQuery<'s> { fn validate_fields(&self) -> Result<(), Vec> { let root_type = self.schema.query_type.as_ref(); - let errors = - self.validate_fields_inner(&"Query".to_owned(), root_type.into(), &self.selection_set); + let errors = self.validate_fields_inner("Query", root_type.into(), &self.selection_set); if errors.is_empty() { Ok(()) } else { @@ -948,17 +934,15 @@ impl Transform { return Ok(None); } a::SelectionSet::new(vec![]) + } else if is_leaf_type { + // see: graphql-bug-compat + // Field does not allow selections, ignore selections + a::SelectionSet::new(vec![]) } else { - if is_leaf_type { - // see: graphql-bug-compat - // Field does not allow selections, ignore selections - a::SelectionSet::new(vec![]) - } else { - let ty = field_type.field_type.get_base_type(); - let type_set = a::ObjectTypeSet::from_name(&self.schema, ty)?; - let ty = self.schema.object_or_interface(ty).unwrap(); - self.expand_selection_set(selection_set, &type_set, ty)? - } + let ty = field_type.field_type.get_base_type(); + let type_set = a::ObjectTypeSet::from_name(&self.schema, ty)?; + let ty = self.schema.object_or_interface(ty).unwrap(); + self.expand_selection_set(selection_set, &type_set, ty)? }; Ok(Some(a::Field { diff --git a/graphql/src/introspection/resolver.rs b/graphql/src/introspection/resolver.rs index ed95e0066b9..f71ee350797 100644 --- a/graphql/src/introspection/resolver.rs +++ b/graphql/src/introspection/resolver.rs @@ -135,7 +135,7 @@ fn interface_type_object( description: interface_type.description.clone(), fields: field_objects(schema, type_objects, &interface_type.fields), - possibleTypes: schema.types_for_interface()[&interface_type.into()] + possibleTypes: schema.types_for_interface()[interface_type.name.as_str()] .iter() .map(|object_type| r::Value::String(object_type.name.to_owned())) .collect::>(), diff --git a/graphql/src/metrics.rs b/graphql/src/metrics.rs index 5163e8cc0ba..69d17ed5c01 100644 --- a/graphql/src/metrics.rs +++ b/graphql/src/metrics.rs @@ -5,7 +5,7 @@ use std::time::Duration; use graph::data::query::QueryResults; use graph::prelude::{DeploymentHash, GraphQLMetrics as GraphQLMetricsTrait, MetricsRegistry}; -use graph::prometheus::{Gauge, Histogram, HistogramVec}; +use graph::prometheus::{CounterVec, Gauge, Histogram, HistogramVec}; pub struct GraphQLMetrics { query_execution_time: Box, @@ -13,6 +13,7 @@ pub struct GraphQLMetrics { query_validation_time: Box, query_result_size: Box, query_result_size_max: Box, + query_validation_error_counter: Box, } impl fmt::Debug for GraphQLMetrics { @@ -64,6 +65,14 @@ impl GraphQLMetricsTrait for GraphQLMetrics { .with_label_values(&[id.as_str()]) .observe(duration.as_secs_f64()); } + + fn observe_query_validation_error(&self, error_codes: Vec<&str>, id: &DeploymentHash) { + for code in error_codes.iter() { + self.query_validation_error_counter + .with_label_values(&[id.as_str(), *code]) + .inc(); + } + } } impl GraphQLMetrics { @@ -111,12 +120,21 @@ impl GraphQLMetrics { ) .unwrap(); + let query_validation_error_counter = registry + .new_counter_vec( + "query_validation_error_counter", + "a counter for the number of validation errors", + vec![String::from("deployment"), String::from("error_code")], + ) + .unwrap(); + Self { query_execution_time, query_parsing_time, query_validation_time, query_result_size, query_result_size_max, + query_validation_error_counter, } } diff --git a/graphql/src/query/mod.rs b/graphql/src/query/mod.rs index b8f9b3fa8c5..707b87936a0 100644 --- a/graphql/src/query/mod.rs +++ b/graphql/src/query/mod.rs @@ -27,6 +27,9 @@ pub struct QueryExecutionOptions { pub max_skip: u32, pub load_manager: Arc, + + /// Whether to include an execution trace in the result + pub trace: bool, } /// Executes a query and returns a result. @@ -49,6 +52,7 @@ where max_first: options.max_first, max_skip: options.max_skip, cache_status: Default::default(), + trace: options.trace, }); if !query.is_query() { diff --git a/graphql/src/runner.rs b/graphql/src/runner.rs index 968c4263d00..89b2c2d182a 100644 --- a/graphql/src/runner.rs +++ b/graphql/src/runner.rs @@ -123,6 +123,7 @@ where .unwrap_or(state); let max_depth = max_depth.unwrap_or(ENV_VARS.graphql.max_depth); + let trace = query.trace; let query = crate::execution::Query::new( &self.logger, schema, @@ -168,6 +169,7 @@ where max_first: max_first.unwrap_or(ENV_VARS.graphql.max_first), max_skip: max_skip.unwrap_or(ENV_VARS.graphql.max_skip), load_manager: self.load_manager.clone(), + trace, }, ) .await; diff --git a/graphql/src/schema/api.rs b/graphql/src/schema/api.rs index b1500b771f8..6794ee8674a 100644 --- a/graphql/src/schema/api.rs +++ b/graphql/src/schema/api.rs @@ -1,6 +1,6 @@ use std::str::FromStr; -use graphql_parser::Pos; +use graphql_parser::{schema::TypeDefinition, Pos}; use inflector::Inflector; use lazy_static::lazy_static; @@ -157,16 +157,7 @@ fn add_order_by_type( description: None, name: type_name, directives: vec![], - values: fields - .iter() - .map(|field| &field.name) - .map(|name| EnumValue { - position: Pos::default(), - description: None, - name: name.to_owned(), - directives: vec![], - }) - .collect(), + values: field_enum_values(schema, fields)?, }); let def = Definition::TypeDefinition(typedef); schema.definitions.push(def); @@ -176,6 +167,80 @@ fn add_order_by_type( Ok(()) } +/// Generates enum values for the given set of fields. +fn field_enum_values( + schema: &Document, + fields: &[Field], +) -> Result, APISchemaError> { + let mut enum_values = vec![]; + for field in fields { + enum_values.push(EnumValue { + position: Pos::default(), + description: None, + name: field.name.to_owned(), + directives: vec![], + }); + enum_values.extend(field_enum_values_from_child_entity(schema, field)?); + } + Ok(enum_values) +} + +fn enum_value_from_child_entity_field( + schema: &Document, + parent_field_name: &str, + field: &Field, +) -> Option { + if ast::is_list_or_non_null_list_field(field) || ast::is_entity_type(schema, &field.field_type) + { + // Sorting on lists or entities is not supported. + None + } else { + Some(EnumValue { + position: Pos::default(), + description: None, + name: format!("{}__{}", parent_field_name, field.name), + directives: vec![], + }) + } +} + +fn field_enum_values_from_child_entity( + schema: &Document, + field: &Field, +) -> Result, APISchemaError> { + fn resolve_supported_type_name(field_type: &Type) -> Option<&String> { + match field_type { + Type::NamedType(name) => Some(name), + Type::ListType(_) => None, + Type::NonNullType(of_type) => resolve_supported_type_name(of_type), + } + } + + let type_name = match ENV_VARS.graphql.disable_child_sorting { + true => None, + false => resolve_supported_type_name(&field.field_type), + }; + + Ok(match type_name { + Some(name) => { + let named_type = schema + .get_named_type(name) + .ok_or_else(|| APISchemaError::TypeNotFound(name.clone()))?; + match named_type { + TypeDefinition::Object(ObjectType { fields, .. }) + | TypeDefinition::Interface(InterfaceType { fields, .. }) => fields + .iter() + .filter_map(|f| { + enum_value_from_child_entity_field(schema, field.name.as_str(), f) + }) + .collect(), + _ => vec![], + } + } + None => vec![], + }) +} + /// Adds a `_filter` enum type for the given fields to the schema. fn add_filter_type( schema: &mut Document, @@ -188,6 +253,30 @@ fn add_filter_type( let mut generated_filter_fields = field_input_values(schema, fields)?; generated_filter_fields.push(block_changed_filter_argument()); + if !ENV_VARS.graphql.disable_bool_filters { + generated_filter_fields.push(InputValue { + position: Pos::default(), + description: None, + name: "and".to_string(), + value_type: Type::ListType(Box::new(Type::NamedType( + filter_type_name.to_owned(), + ))), + default_value: None, + directives: vec![], + }); + + generated_filter_fields.push(InputValue { + position: Pos::default(), + description: None, + name: "or".to_string(), + value_type: Type::ListType(Box::new(Type::NamedType( + filter_type_name.to_owned(), + ))), + default_value: None, + directives: vec![], + }); + } + let typedef = TypeDefinition::InputObject(InputObjectType { position: Pos::default(), description: None, @@ -228,7 +317,7 @@ fn field_filter_input_values( .get_named_type(name) .ok_or_else(|| APISchemaError::TypeNotFound(name.clone()))?; Ok(match named_type { - TypeDefinition::Object(_) => { + TypeDefinition::Object(_) | TypeDefinition::Interface(_) => { let mut input_values = match ast::get_derived_from_directive(field) { // Only add `where` filter fields for object and interface fields // if they are not @derivedFrom @@ -246,30 +335,13 @@ fn field_filter_input_values( extend_with_child_filter_input_value(field, name, &mut input_values); input_values } - TypeDefinition::Interface(_) => { - // Only add `where` filter fields for object and interface fields - // if they are not @derivedFrom - if ast::get_derived_from_directive(field).is_some() { - vec![] - } else { - // We allow filtering with `where: { other: "some-id" }` and - // `where: { others: ["some-id", "other-id"] }`. In both cases, - // we allow ID strings as the values to be passed to these - // filters. - field_scalar_filter_input_values( - schema, - field, - &ScalarType::new(String::from("String")), - ) - } - } TypeDefinition::Scalar(ref t) => field_scalar_filter_input_values(schema, field, t), TypeDefinition::Enum(ref t) => field_enum_filter_input_values(schema, field, t), _ => vec![], }) } Type::ListType(ref t) => { - Ok(field_list_filter_input_values(schema, field, t).unwrap_or(vec![])) + Ok(field_list_filter_input_values(schema, field, t).unwrap_or_default()) } Type::NonNullType(ref t) => field_filter_input_values(schema, field, t), } @@ -284,7 +356,18 @@ fn field_scalar_filter_input_values( match field_type.name.as_ref() { "BigInt" => vec!["", "not", "gt", "lt", "gte", "lte", "in", "not_in"], "Boolean" => vec!["", "not", "in", "not_in"], - "Bytes" => vec!["", "not", "in", "not_in", "contains", "not_contains"], + "Bytes" => vec![ + "", + "not", + "gt", + "lt", + "gte", + "lte", + "in", + "not_in", + "contains", + "not_contains", + ], "BigDecimal" => vec!["", "not", "gt", "lt", "gte", "lte", "in", "not_in"], "ID" => vec!["", "not", "gt", "lt", "gte", "lte", "in", "not_in"], "Int" => vec!["", "not", "gt", "lt", "gte", "lte", "in", "not_in"], @@ -365,27 +448,18 @@ fn field_list_filter_input_values( field_type: &Type, ) -> Option> { // Only add a filter field if the type of the field exists in the schema - ast::get_type_definition_from_type(schema, field_type).and_then(|typedef| { + ast::get_type_definition_from_type(schema, field_type).map(|typedef| { // Decide what type of values can be passed to the filter. In the case // one-to-many or many-to-many object or interface fields that are not // derived, we allow ID strings to be passed on. // Adds child filter only to object types. let (input_field_type, parent_type_name) = match typedef { - TypeDefinition::Object(parent) => { + TypeDefinition::Object(ObjectType { name, .. }) + | TypeDefinition::Interface(InterfaceType { name, .. }) => { if ast::get_derived_from_directive(field).is_some() { - (None, Some(parent.name.clone())) + (None, Some(name.clone())) } else { - ( - Some(Type::NamedType("String".into())), - Some(parent.name.clone()), - ) - } - } - TypeDefinition::Interface(_) => { - if ast::get_derived_from_directive(field).is_some() { - (None, None) - } else { - (Some(Type::NamedType("String".into())), None) + (Some(Type::NamedType("String".into())), Some(name.clone())) } } TypeDefinition::Scalar(ref t) => (Some(Type::NamedType(t.name.to_owned())), None), @@ -422,7 +496,7 @@ fn field_list_filter_input_values( extend_with_child_filter_input_value(field, &parent, &mut input_values); } - Some(input_values) + input_values }) } @@ -688,27 +762,27 @@ fn collection_arguments_for_named_type(type_name: &str) -> Vec { // `first` and `skip` should be non-nullable, but the Apollo graphql client // exhibts non-conforming behaviour by erroing if no value is provided for a // non-nullable field, regardless of the presence of a default. - let mut skip = input_value(&"skip".to_string(), "", Type::NamedType("Int".to_string())); + let mut skip = input_value("skip", "", Type::NamedType("Int".to_string())); skip.default_value = Some(Value::Int(0.into())); - let mut first = input_value(&"first".to_string(), "", Type::NamedType("Int".to_string())); + let mut first = input_value("first", "", Type::NamedType("Int".to_string())); first.default_value = Some(Value::Int(100.into())); let args = vec![ skip, first, input_value( - &"orderBy".to_string(), + "orderBy", "", Type::NamedType(format!("{}_orderBy", type_name)), ), input_value( - &"orderDirection".to_string(), + "orderDirection", "", Type::NamedType("OrderDirection".to_string()), ), input_value( - &"where".to_string(), + "where", "", Type::NamedType(format!("{}_filter", type_name)), ), @@ -878,6 +952,180 @@ mod tests { assert_eq!(values, ["id", "name"]); } + #[test] + fn api_schema_contains_field_order_by_enum_for_child_entity() { + let input_schema = parse_schema( + r#" + enum FurType { + NONE + FLUFFY + BRISTLY + } + + type Pet { + id: ID! + name: String! + mostHatedBy: [User!]! + mostLovedBy: [User!]! + } + + interface Recipe { + id: ID! + name: String! + author: User! + lovedBy: [User!]! + ingredients: [String!]! + } + + type FoodRecipe implements Recipe { + id: ID! + name: String! + author: User! + ingredients: [String!]! + } + + type DrinkRecipe implements Recipe { + id: ID! + name: String! + author: User! + ingredients: [String!]! + } + + interface Meal { + id: ID! + name: String! + mostHatedBy: [User!]! + mostLovedBy: [User!]! + } + + type Pizza implements Meal { + id: ID! + name: String! + toppings: [String!]! + mostHatedBy: [User!]! + mostLovedBy: [User!]! + } + + type Burger implements Meal { + id: ID! + name: String! + bun: String! + mostHatedBy: [User!]! + mostLovedBy: [User!]! + } + + type User { + id: ID! + name: String! + favoritePetNames: [String!] + pets: [Pet!]! + favoriteFurType: FurType! + favoritePet: Pet! + leastFavoritePet: Pet @derivedFrom(field: "mostHatedBy") + mostFavoritePets: [Pet!] @derivedFrom(field: "mostLovedBy") + favoriteMeal: Meal! + leastFavoriteMeal: Meal @derivedFrom(field: "mostHatedBy") + mostFavoriteMeals: [Meal!] @derivedFrom(field: "mostLovedBy") + recipes: [Recipe!]! @derivedFrom(field: "author") + } + "#, + ) + .expect("Failed to parse input schema"); + let schema = api_schema(&input_schema).expect("Failed to derived API schema"); + + let user_order_by = schema + .get_named_type("User_orderBy") + .expect("User_orderBy type is missing in derived API schema"); + + let enum_type = match user_order_by { + TypeDefinition::Enum(t) => Some(t), + _ => None, + } + .expect("User_orderBy type is not an enum"); + + let values: Vec<&str> = enum_type + .values + .iter() + .map(|value| value.name.as_str()) + .collect(); + + assert_eq!( + values, + [ + "id", + "name", + "favoritePetNames", + "pets", + "favoriteFurType", + "favoritePet", + "favoritePet__id", + "favoritePet__name", + "leastFavoritePet", + "leastFavoritePet__id", + "leastFavoritePet__name", + "mostFavoritePets", + "favoriteMeal", + "favoriteMeal__id", + "favoriteMeal__name", + "leastFavoriteMeal", + "leastFavoriteMeal__id", + "leastFavoriteMeal__name", + "mostFavoriteMeals", + "recipes", + ] + ); + + let meal_order_by = schema + .get_named_type("Meal_orderBy") + .expect("Meal_orderBy type is missing in derived API schema"); + + let enum_type = match meal_order_by { + TypeDefinition::Enum(t) => Some(t), + _ => None, + } + .expect("Meal_orderBy type is not an enum"); + + let values: Vec<&str> = enum_type + .values + .iter() + .map(|value| value.name.as_str()) + .collect(); + + assert_eq!(values, ["id", "name", "mostHatedBy", "mostLovedBy",]); + + let recipe_order_by = schema + .get_named_type("Recipe_orderBy") + .expect("Recipe_orderBy type is missing in derived API schema"); + + let enum_type = match recipe_order_by { + TypeDefinition::Enum(t) => Some(t), + _ => None, + } + .expect("Recipe_orderBy type is not an enum"); + + let values: Vec<&str> = enum_type + .values + .iter() + .map(|value| value.name.as_str()) + .collect(); + + assert_eq!( + values, + [ + "id", + "name", + "author", + "author__id", + "author__name", + "author__favoriteFurType", + "author__favoritePet", + "author__leastFavoritePet", + "lovedBy", + "ingredients" + ] + ); + } + #[test] fn api_schema_contains_object_type_filter_enum() { let input_schema = parse_schema( @@ -995,7 +1243,9 @@ mod tests { "favoritePet_", "leastFavoritePet_", "mostFavoritePets_", - "_change_block" + "_change_block", + "and", + "or" ] .iter() .map(ToString::to_string) @@ -1072,7 +1322,9 @@ mod tests { "mostLovedBy_not_contains", "mostLovedBy_not_contains_nocase", "mostLovedBy_", - "_change_block" + "_change_block", + "and", + "or" ] .iter() .map(ToString::to_string) @@ -1174,6 +1426,7 @@ mod tests { "name_ends_with_nocase", "name_not_ends_with", "name_not_ends_with_nocase", + "pets_", "favoritePet", "favoritePet_not", "favoritePet_gt", @@ -1194,7 +1447,10 @@ mod tests { "favoritePet_ends_with_nocase", "favoritePet_not_ends_with", "favoritePet_not_ends_with_nocase", - "_change_block" + "favoritePet_", + "_change_block", + "and", + "or" ] .iter() .map(ToString::to_string) diff --git a/graphql/src/schema/ast.rs b/graphql/src/schema/ast.rs index 6460f244eac..6d3abf06670 100644 --- a/graphql/src/schema/ast.rs +++ b/graphql/src/schema/ast.rs @@ -34,6 +34,8 @@ pub(crate) enum FilterOp { NotEndsWithNoCase, Equal, Child, + And, + Or, } /// Split a "name_eq" style name into an attribute ("name") and a filter op (`Equal`). @@ -66,12 +68,18 @@ pub(crate) fn parse_field_as_filter(key: &str) -> (String, FilterOp) { } k if k.ends_with("_ends_with") => ("_ends_with", FilterOp::EndsWith), k if k.ends_with("_ends_with_nocase") => ("_ends_with_nocase", FilterOp::EndsWithNoCase), - k if k.ends_with("_") => ("_", FilterOp::Child), + k if k.ends_with('_') => ("_", FilterOp::Child), + k if k.eq("and") => ("and", FilterOp::And), + k if k.eq("or") => ("or", FilterOp::Or), _ => ("", FilterOp::Equal), }; - // Strip the operator suffix to get the attribute. - (key.trim_end_matches(suffix).to_owned(), op) + return match op { + FilterOp::And => (key.to_owned(), op), + FilterOp::Or => (key.to_owned(), op), + // Strip the operator suffix to get the attribute. + _ => (key.trim_end_matches(suffix).to_owned(), op), + }; } /// An `ObjectType` with `Hash` and `Eq` derived from the name. @@ -200,8 +208,8 @@ pub fn get_field<'a>( /// Returns the value type for a GraphQL field type. pub fn get_field_value_type(field_type: &s::Type) -> Result { match field_type { - s::Type::NamedType(ref name) => ValueType::from_str(&name), - s::Type::NonNullType(inner) => get_field_value_type(&inner), + s::Type::NamedType(ref name) => ValueType::from_str(name), + s::Type::NonNullType(inner) => get_field_value_type(inner), s::Type::ListType(_) => Err(anyhow!("Only scalar values are supported in this context")), } } diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index e480181338f..5a3a3a36534 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -5,7 +5,7 @@ use anyhow::{anyhow, Error}; use graph::constraint_violation; use graph::data::query::Trace; use graph::data::value::{Object, Word}; -use graph::prelude::{r, CacheWeight}; +use graph::prelude::{r, CacheWeight, CheapClone}; use graph::slog::warn; use graph::util::cache_weight; use lazy_static::lazy_static; @@ -17,10 +17,9 @@ use graph::{components::store::EntityType, data::graphql::*}; use graph::{ data::graphql::ext::DirectiveFinder, prelude::{ - s, ApiSchema, AttributeNames, BlockNumber, ChildMultiplicity, EntityCollection, - EntityFilter, EntityLink, EntityOrder, EntityWindow, Logger, ParentLink, - QueryExecutionError, QueryStore, StoreError, Value as StoreValue, WindowAttribute, - ENV_VARS, + s, ApiSchema, AttributeNames, ChildMultiplicity, EntityCollection, EntityFilter, + EntityLink, EntityOrder, EntityWindow, ParentLink, QueryExecutionError, StoreError, + Value as StoreValue, WindowAttribute, ENV_VARS, }, }; @@ -419,7 +418,7 @@ impl<'a> Join<'a> { // query are always joined first, and may then be overwritten by the merged selection // set under the object type condition. See also: e0d6da3e-60cf-41a5-b83c-b60a7a766d4a let values = parent.id().ok().and_then(|id| grouped.get(&*id).cloned()); - parent.set_children(response_key.to_owned(), values.unwrap_or(vec![])); + parent.set_children(response_key.to_owned(), values.unwrap_or_default()); } } @@ -485,16 +484,11 @@ pub fn run( ) -> Result<(r::Value, Trace), Vec> { execute_root_selection_set(resolver, ctx, selection_set).map(|(nodes, trace)| { graphql_metrics.observe_query_result_size(nodes.weight()); - let obj = Object::from_iter( - nodes + let obj = Object::from_iter(nodes.into_iter().flat_map(|node| { + node.children .into_iter() - .map(|node| { - node.children.into_iter().map(|(key, nodes)| { - (format!("prefetch:{}", key), node_list_as_value(nodes)) - }) - }) - .flatten(), - ); + .map(|(key, nodes)| (format!("prefetch:{}", key), node_list_as_value(nodes))) + })); (r::Value::Object(obj), trace) }) } @@ -505,7 +499,13 @@ fn execute_root_selection_set( ctx: &ExecutionContext, selection_set: &a::SelectionSet, ) -> Result<(Vec, Trace), Vec> { - let trace = Trace::root(ctx.query.query_text.clone()); + let trace = Trace::root( + &ctx.query.query_text, + &ctx.query.variables_text, + &ctx.query.query_id, + resolver.block_number(), + ctx.trace, + ); // Execute the root selection set against the root query type execute_selection_set(resolver, ctx, make_root_node(), trace, selection_set) } @@ -514,15 +514,15 @@ fn check_result_size<'a>( ctx: &'a ExecutionContext, size: usize, ) -> Result<(), QueryExecutionError> { + if size > ENV_VARS.graphql.warn_result_size { + warn!(ctx.logger, "Large query result"; "size" => size, "query_id" => &ctx.query.query_id); + } if size > ENV_VARS.graphql.error_result_size { return Err(QueryExecutionError::ResultTooBig( size, ENV_VARS.graphql.error_result_size, )); } - if size > ENV_VARS.graphql.warn_result_size { - warn!(ctx.logger, "Large query result"; "size" => size, "query_id" => &ctx.query.query_id); - } Ok(()) } @@ -642,18 +642,12 @@ fn execute_field( }; fetch( - ctx.logger.clone(), - resolver.store.as_ref(), + resolver, + ctx, parents, join, - ctx.query.schema.as_ref(), field, multiplicity, - ctx.query.schema.types_for_interface(), - resolver.block_number(), - ctx.max_first, - ctx.max_skip, - ctx.query.query_id.clone(), selected_attrs, ) .map_err(|e| vec![e]) @@ -663,31 +657,26 @@ fn execute_field( /// in which child field to look for the parent's id/join field. When /// `is_single` is `true`, there is at most one child per parent. fn fetch( - logger: Logger, - store: &(impl QueryStore + ?Sized), + resolver: &StoreResolver, + ctx: &ExecutionContext, parents: &[&mut Node], join: &Join<'_>, - schema: &ApiSchema, field: &a::Field, multiplicity: ChildMultiplicity, - types_for_interface: &BTreeMap>, - block: BlockNumber, - max_first: u32, - max_skip: u32, - query_id: String, selected_attrs: SelectedAttributes, ) -> Result<(Vec, Trace), QueryExecutionError> { let mut query = build_query( join.child_type, - block, + resolver.block_number(), field, - types_for_interface, - max_first, - max_skip, + ctx.query.schema.types_for_interface(), + ctx.max_first, + ctx.max_skip, selected_attrs, - schema, + &ctx.query.schema, )?; - query.query_id = Some(query_id); + query.trace = ctx.trace; + query.query_id = Some(ctx.query.query_id.clone()); if multiplicity == ChildMultiplicity::Single { // Suppress 'order by' in lookups of scalar values since @@ -695,7 +684,7 @@ fn fetch( query.order = EntityOrder::Unordered; } - query.logger = Some(logger); + query.logger = Some(ctx.logger.cheap_clone()); if let Some(r::Value::String(id)) = field.argument_value(ARG_ID.as_str()) { query.filter = Some( EntityFilter::Equal(ARG_ID.to_owned(), StoreValue::from(id.to_owned())) @@ -712,12 +701,15 @@ fn fetch( } query.collection = EntityCollection::Window(windows); } - store.find_query_values(query).map(|(values, trace)| { - ( - values.into_iter().map(|entity| entity.into()).collect(), - trace, - ) - }) + resolver + .store + .find_query_values(query) + .map(|(values, trace)| { + ( + values.into_iter().map(|entity| entity.into()).collect(), + trace, + ) + }) } #[derive(Debug, Default, Clone)] diff --git a/graphql/src/store/query.rs b/graphql/src/store/query.rs index e9b38e3017a..98f25e97dd5 100644 --- a/graphql/src/store/query.rs +++ b/graphql/src/store/query.rs @@ -9,7 +9,7 @@ use graph::prelude::*; use graph::{components::store::EntityType, data::graphql::ObjectOrInterface}; use crate::execution::ast as a; -use crate::schema::ast as sast; +use crate::schema::ast::{self as sast, FilterOp}; use super::prefetch::SelectedAttributes; @@ -53,15 +53,73 @@ pub(crate) fn build_query<'a>( query = query.filter(filter); } let order = match ( - build_order_by(entity, field)?, + build_order_by(entity, field, schema)?, build_order_direction(field)?, ) { - (Some((attr, value_type)), OrderDirection::Ascending) => { + (Some((attr, value_type, None)), OrderDirection::Ascending) => { EntityOrder::Ascending(attr, value_type) } - (Some((attr, value_type)), OrderDirection::Descending) => { + (Some((attr, value_type, None)), OrderDirection::Descending) => { EntityOrder::Descending(attr, value_type) } + (Some((attr, _, Some(child))), OrderDirection::Ascending) => { + if ENV_VARS.graphql.disable_child_sorting { + return Err(QueryExecutionError::NotSupported( + "Sorting by child attributes is not supported".to_string(), + )); + } + match child { + OrderByChild::Object(child) => { + EntityOrder::ChildAscending(EntityOrderByChild::Object( + EntityOrderByChildInfo { + sort_by_attribute: attr, + join_attribute: child.join_attribute, + derived: child.derived, + }, + child.entity_type, + )) + } + OrderByChild::Interface(child) => { + EntityOrder::ChildAscending(EntityOrderByChild::Interface( + EntityOrderByChildInfo { + sort_by_attribute: attr, + join_attribute: child.join_attribute, + derived: child.derived, + }, + child.entity_types, + )) + } + } + } + (Some((attr, _, Some(child))), OrderDirection::Descending) => { + if ENV_VARS.graphql.disable_child_sorting { + return Err(QueryExecutionError::NotSupported( + "Sorting by child attributes is not supported".to_string(), + )); + } + match child { + OrderByChild::Object(child) => { + EntityOrder::ChildDescending(EntityOrderByChild::Object( + EntityOrderByChildInfo { + sort_by_attribute: attr, + join_attribute: child.join_attribute, + derived: child.derived, + }, + child.entity_type, + )) + } + OrderByChild::Interface(child) => { + EntityOrder::ChildDescending(EntityOrderByChild::Interface( + EntityOrderByChildInfo { + sort_by_attribute: attr, + join_attribute: child.join_attribute, + derived: child.derived, + }, + child.entity_types, + )) + } + } + } (None, _) => EntityOrder::Default, }; query = query.order(order); @@ -118,7 +176,7 @@ fn build_filter( ) -> Result, QueryExecutionError> { match field.argument_value("where") { Some(r::Value::Object(object)) => match build_filter_from_object(entity, object, schema) { - Ok(filter) => Ok(Some(filter)), + Ok(filter) => Ok(Some(EntityFilter::And(filter))), Err(e) => Err(e), }, Some(r::Value::Null) => Ok(None), @@ -161,91 +219,165 @@ fn parse_change_block_filter(value: &r::Value) -> Result Result { + return match operation { + FilterOp::Not => Ok(EntityFilter::Not(field_name, store_value)), + FilterOp::GreaterThan => Ok(EntityFilter::GreaterThan(field_name, store_value)), + FilterOp::LessThan => Ok(EntityFilter::LessThan(field_name, store_value)), + FilterOp::GreaterOrEqual => Ok(EntityFilter::GreaterOrEqual(field_name, store_value)), + FilterOp::LessOrEqual => Ok(EntityFilter::LessOrEqual(field_name, store_value)), + FilterOp::In => Ok(EntityFilter::In( + field_name, + list_values(store_value, "_in")?, + )), + FilterOp::NotIn => Ok(EntityFilter::NotIn( + field_name, + list_values(store_value, "_not_in")?, + )), + FilterOp::Contains => Ok(EntityFilter::Contains(field_name, store_value)), + FilterOp::ContainsNoCase => Ok(EntityFilter::ContainsNoCase(field_name, store_value)), + FilterOp::NotContains => Ok(EntityFilter::NotContains(field_name, store_value)), + FilterOp::NotContainsNoCase => Ok(EntityFilter::NotContainsNoCase(field_name, store_value)), + FilterOp::StartsWith => Ok(EntityFilter::StartsWith(field_name, store_value)), + FilterOp::StartsWithNoCase => Ok(EntityFilter::StartsWithNoCase(field_name, store_value)), + FilterOp::NotStartsWith => Ok(EntityFilter::NotStartsWith(field_name, store_value)), + FilterOp::NotStartsWithNoCase => { + Ok(EntityFilter::NotStartsWithNoCase(field_name, store_value)) + } + FilterOp::EndsWith => Ok(EntityFilter::EndsWith(field_name, store_value)), + FilterOp::EndsWithNoCase => Ok(EntityFilter::EndsWithNoCase(field_name, store_value)), + FilterOp::NotEndsWith => Ok(EntityFilter::NotEndsWith(field_name, store_value)), + FilterOp::NotEndsWithNoCase => Ok(EntityFilter::NotEndsWithNoCase(field_name, store_value)), + FilterOp::Equal => Ok(EntityFilter::Equal(field_name, store_value)), + _ => unreachable!(), + }; +} + +/// Iterate over the list and generate an EntityFilter from it +fn build_list_filter_from_value( + entity: ObjectOrInterface, + schema: &ApiSchema, + value: &r::Value, +) -> Result, QueryExecutionError> { + // We have object like this + // { or: [{ name: \"John\", id: \"m1\" }, { mainBand: \"b2\" }] } + return match value { + r::Value::List(list) => Ok(list + .iter() + .map(|item| { + // It is each filter in the object + // { name: \"John\", id: \"m1\" } + // the fields within the object are ANDed together + return match item { + r::Value::Object(object) => Ok(EntityFilter::And(build_filter_from_object( + entity, object, schema, + )?)), + _ => Err(QueryExecutionError::InvalidFilterError), + }; + }) + .collect::, QueryExecutionError>>()?), + _ => Err(QueryExecutionError::InvalidFilterError), + }; +} + +/// build a filter which has list of nested filters +fn build_list_filter_from_object( + entity: ObjectOrInterface, + object: &Object, + schema: &ApiSchema, +) -> Result, QueryExecutionError> { + Ok(object + .iter() + .map(|(_, value)| { + return build_list_filter_from_value(entity, schema, value); + }) + .collect::>, QueryExecutionError>>()? + .into_iter() + // We iterate an object so all entity filters are flattened into one list + .flatten() + .collect::>()) +} + /// Parses a GraphQL input object into an EntityFilter, if present. fn build_filter_from_object( entity: ObjectOrInterface, object: &Object, schema: &ApiSchema, -) -> Result { - Ok(EntityFilter::And({ - object - .iter() - .map(|(key, value)| { - // Special handling for _change_block input filter since its not a - // standard entity filter that is based on entity structure/fields - if key == "_change_block" { - return match parse_change_block_filter(value) { - Ok(block_number) => Ok(EntityFilter::ChangeBlockGte(block_number)), - Err(e) => Err(e), - }; - } - - use self::sast::FilterOp::*; - let (field_name, op) = sast::parse_field_as_filter(key); +) -> Result, QueryExecutionError> { + Ok(object + .iter() + .map(|(key, value)| { + // Special handling for _change_block input filter since its not a + // standard entity filter that is based on entity structure/fields + if key == "_change_block" { + return match parse_change_block_filter(value) { + Ok(block_number) => Ok(EntityFilter::ChangeBlockGte(block_number)), + Err(e) => Err(e), + }; + } + use self::sast::FilterOp::*; + let (field_name, op) = sast::parse_field_as_filter(key); - let field = sast::get_field(entity, &field_name).ok_or_else(|| { - QueryExecutionError::EntityFieldError( - entity.name().to_owned(), - field_name.clone(), - ) - })?; + Ok(match op { + And => { + if ENV_VARS.graphql.disable_bool_filters { + return Err(QueryExecutionError::NotSupported( + "Boolean filters are not supported".to_string(), + )); + } - let ty = &field.field_type; + return Ok(EntityFilter::And(build_list_filter_from_object( + entity, object, schema, + )?)); + } + Or => { + if ENV_VARS.graphql.disable_bool_filters { + return Err(QueryExecutionError::NotSupported( + "Boolean filters are not supported".to_string(), + )); + } - Ok(match op { - Child => match value { - DataValue::Object(obj) => { - build_child_filter_from_object(entity, field_name, obj, schema)? - } - _ => { - return Err(QueryExecutionError::AttributeTypeError( - value.to_string(), - ty.to_string(), - )) - } - }, + return Ok(EntityFilter::Or(build_list_filter_from_object( + entity, object, schema, + )?)); + } + Child => match value { + DataValue::Object(obj) => { + build_child_filter_from_object(entity, field_name, obj, schema)? + } _ => { - let store_value = Value::from_query_value(value, ty)?; - - match op { - Not => EntityFilter::Not(field_name, store_value), - GreaterThan => EntityFilter::GreaterThan(field_name, store_value), - LessThan => EntityFilter::LessThan(field_name, store_value), - GreaterOrEqual => EntityFilter::GreaterOrEqual(field_name, store_value), - LessOrEqual => EntityFilter::LessOrEqual(field_name, store_value), - In => EntityFilter::In(field_name, list_values(store_value, "_in")?), - NotIn => EntityFilter::NotIn( - field_name, - list_values(store_value, "_not_in")?, - ), - Contains => EntityFilter::Contains(field_name, store_value), - ContainsNoCase => EntityFilter::ContainsNoCase(field_name, store_value), - NotContains => EntityFilter::NotContains(field_name, store_value), - NotContainsNoCase => { - EntityFilter::NotContainsNoCase(field_name, store_value) - } - StartsWith => EntityFilter::StartsWith(field_name, store_value), - StartsWithNoCase => { - EntityFilter::StartsWithNoCase(field_name, store_value) - } - NotStartsWith => EntityFilter::NotStartsWith(field_name, store_value), - NotStartsWithNoCase => { - EntityFilter::NotStartsWithNoCase(field_name, store_value) - } - EndsWith => EntityFilter::EndsWith(field_name, store_value), - EndsWithNoCase => EntityFilter::EndsWithNoCase(field_name, store_value), - NotEndsWith => EntityFilter::NotEndsWith(field_name, store_value), - NotEndsWithNoCase => { - EntityFilter::NotEndsWithNoCase(field_name, store_value) - } - Equal => EntityFilter::Equal(field_name, store_value), - _ => unreachable!(), - } + let field = sast::get_field(entity, &field_name).ok_or_else(|| { + QueryExecutionError::EntityFieldError( + entity.name().to_owned(), + field_name.clone(), + ) + })?; + let ty = &field.field_type; + return Err(QueryExecutionError::AttributeTypeError( + value.to_string(), + ty.to_string(), + )); } - }) + }, + _ => { + let field = sast::get_field(entity, &field_name).ok_or_else(|| { + QueryExecutionError::EntityFieldError( + entity.name().to_owned(), + field_name.clone(), + ) + })?; + let ty = &field.field_type; + let store_value = Value::from_query_value(value, ty)?; + return build_entity_filter(field_name, op, store_value); + } }) - .collect::, QueryExecutionError>>()? - })) + }) + .collect::, QueryExecutionError>>()?) } fn build_child_filter_from_object( @@ -261,21 +393,79 @@ fn build_child_filter_from_object( let child_entity = schema .object_or_interface(type_name) .ok_or(QueryExecutionError::InvalidFilterError)?; - let filter = build_filter_from_object(child_entity, object, schema)?; + let filter = Box::new(EntityFilter::And(build_filter_from_object( + child_entity, + object, + schema, + )?)); let derived = field.is_derived(); + let attr = match derived { + true => sast::get_derived_from_field(child_entity, field) + .ok_or(QueryExecutionError::InvalidFilterError)? + .name + .to_string(), + false => field_name.clone(), + }; - Ok(EntityFilter::Child(Child { - attr: match derived { - true => sast::get_derived_from_field(child_entity, field) - .ok_or(QueryExecutionError::InvalidFilterError)? - .name - .to_string(), - false => field_name, - }, - entity_type: EntityType::new(type_name.to_string()), - filter: Box::new(filter), - derived, - })) + if child_entity.is_interface() { + Ok(EntityFilter::Or( + child_entity + .object_types(schema.schema()) + .ok_or(QueryExecutionError::AbstractTypeError( + "Interface is not implemented by any types".to_string(), + ))? + .iter() + .map(|object_type| { + EntityFilter::Child(Child { + attr: attr.clone(), + entity_type: EntityType::new(object_type.name.to_string()), + filter: filter.clone(), + derived, + }) + }) + .collect(), + )) + } else if entity.is_interface() { + Ok(EntityFilter::Or( + entity + .object_types(schema.schema()) + .ok_or(QueryExecutionError::AbstractTypeError( + "Interface is not implemented by any types".to_string(), + ))? + .iter() + .map(|object_type| { + let field = object_type + .fields + .iter() + .find(|f| f.name == field_name.clone()) + .ok_or(QueryExecutionError::InvalidFilterError)?; + let derived = field.is_derived(); + + let attr = match derived { + true => sast::get_derived_from_field(child_entity, field) + .ok_or(QueryExecutionError::InvalidFilterError)? + .name + .to_string(), + false => field_name.clone(), + }; + + Ok(EntityFilter::Child(Child { + attr: attr.clone(), + entity_type: EntityType::new(child_entity.name().to_string()), + filter: filter.clone(), + derived, + })) + }) + .collect::, QueryExecutionError>>()?, + )) + } else { + Ok(EntityFilter::Child(Child { + attr, + entity_type: EntityType::new(type_name.to_string()), + filter, + derived, + })) + } } /// Parses a list of GraphQL values into a vector of entity field values. @@ -306,27 +496,150 @@ fn list_values(value: Value, filter_type: &str) -> Result, QueryExecu } } +enum OrderByValue { + Direct(String), + Child(String, String), +} + +fn parse_order_by(enum_value: &String) -> Result { + let mut parts = enum_value.split("__"); + let first = parts.next().ok_or_else(|| { + QueryExecutionError::ValueParseError( + "Invalid order value".to_string(), + enum_value.to_string(), + ) + })?; + let second = parts.next(); + + Ok(match second { + Some(second) => OrderByValue::Child(first.to_string(), second.to_string()), + None => OrderByValue::Direct(first.to_string()), + }) +} + +struct ObjectOrderDetails { + entity_type: EntityType, + join_attribute: Attribute, + derived: bool, +} + +struct InterfaceOrderDetails { + entity_types: Vec, + join_attribute: Attribute, + derived: bool, +} + +enum OrderByChild { + Object(ObjectOrderDetails), + Interface(InterfaceOrderDetails), +} + /// Parses GraphQL arguments into an field name to order by, if present. fn build_order_by( entity: ObjectOrInterface, field: &a::Field, -) -> Result, QueryExecutionError> { + schema: &ApiSchema, +) -> Result)>, QueryExecutionError> { match field.argument_value("orderBy") { - Some(r::Value::Enum(name)) => { - let field = sast::get_field(entity, name).ok_or_else(|| { - QueryExecutionError::EntityFieldError(entity.name().to_owned(), name.clone()) - })?; - sast::get_field_value_type(&field.field_type) - .map(|value_type| Some((name.to_owned(), value_type))) - .map_err(|_| { - QueryExecutionError::OrderByNotSupportedError( + Some(r::Value::Enum(name)) => match parse_order_by(name)? { + OrderByValue::Direct(name) => { + let field = sast::get_field(entity, name.as_str()).ok_or_else(|| { + QueryExecutionError::EntityFieldError(entity.name().to_owned(), name.clone()) + })?; + sast::get_field_value_type(&field.field_type) + .map(|value_type| Some((name.to_owned(), value_type, None))) + .map_err(|_| { + QueryExecutionError::OrderByNotSupportedError( + entity.name().to_owned(), + name.clone(), + ) + }) + } + OrderByValue::Child(parent_field_name, child_field_name) => { + if entity.is_interface() { + return Err(QueryExecutionError::OrderByNotSupportedError( entity.name().to_owned(), - name.clone(), - ) - }) - } + parent_field_name.clone(), + )); + } + + let field = + sast::get_field(entity, parent_field_name.as_str()).ok_or_else(|| { + QueryExecutionError::EntityFieldError( + entity.name().to_owned(), + parent_field_name.clone(), + ) + })?; + let derived = field.is_derived(); + let base_type = field.field_type.get_base_type(); + let child_entity = schema + .object_or_interface(base_type) + .ok_or_else(|| QueryExecutionError::NamedTypeError(base_type.into()))?; + let child_field = sast::get_field(child_entity, child_field_name.as_str()) + .ok_or_else(|| { + QueryExecutionError::EntityFieldError( + child_entity.name().to_owned(), + child_field_name.clone(), + ) + })?; + + let join_attribute = match derived { + true => sast::get_derived_from_field(child_entity, field) + .ok_or_else(|| { + QueryExecutionError::EntityFieldError( + entity.name().to_string(), + field.name.to_string(), + ) + })? + .name + .to_string(), + false => parent_field_name, + }; + + let child = match child_entity { + ObjectOrInterface::Object(_) => OrderByChild::Object(ObjectOrderDetails { + entity_type: EntityType::new(base_type.into()), + join_attribute, + derived, + }), + ObjectOrInterface::Interface(interface) => { + let entity_types = schema + .types_for_interface() + .get(&EntityType::new(interface.name.to_string())) + .map(|object_types| { + object_types + .iter() + .map(|object_type| EntityType::new(object_type.name.clone())) + .collect::>() + }) + .ok_or(QueryExecutionError::AbstractTypeError( + "Interface not implemented by any object type".to_string(), + ))?; + OrderByChild::Interface(InterfaceOrderDetails { + entity_types, + join_attribute, + derived, + }) + } + }; + + sast::get_field_value_type(&child_field.field_type) + .map(|value_type| Some((child_field_name.to_owned(), value_type, Some(child)))) + .map_err(|_| { + QueryExecutionError::OrderByNotSupportedError( + child_entity.name().to_owned(), + child_field_name.clone(), + ) + }) + } + }, _ => match field.argument_value("text") { - Some(r::Value::Object(filter)) => build_fulltext_order_by_from_object(filter), + Some(r::Value::Object(filter)) => { + build_fulltext_order_by_from_object(filter).map(|order_by| match order_by { + Some((attr, value)) => Some((attr, value, None)), + None => None, + }) + } None => Ok(None), _ => Err(QueryExecutionError::InvalidFilterError), }, diff --git a/graphql/src/subscription/mod.rs b/graphql/src/subscription/mod.rs index c07bf27d209..ebb597a83e2 100644 --- a/graphql/src/subscription/mod.rs +++ b/graphql/src/subscription/mod.rs @@ -97,6 +97,7 @@ fn create_source_event_stream( max_first: options.max_first, max_skip: options.max_skip, cache_status: Default::default(), + trace: ENV_VARS.log_sql_timing(), }; let subscription_type = ctx @@ -222,6 +223,7 @@ async fn execute_subscription_event( max_first, max_skip, cache_status: Default::default(), + trace: ENV_VARS.log_sql_timing(), }); let subscription_type = match ctx.query.schema.subscription_type.as_ref() { diff --git a/graphql/tests/introspection.rs b/graphql/tests/introspection.rs index 10566842e10..ab2360e2567 100644 --- a/graphql/tests/introspection.rs +++ b/graphql/tests/introspection.rs @@ -560,6 +560,7 @@ async fn introspection_query(schema: Schema, query: &str) -> QueryResult { let query = Query::new( graphql_parser::parse_query(query).unwrap().into_static(), None, + false, ); // Execute it @@ -570,6 +571,7 @@ async fn introspection_query(schema: Schema, query: &str) -> QueryResult { max_first: std::u32::MAX, max_skip: std::u32::MAX, load_manager: LOAD_MANAGER.clone(), + trace: false, }; let schema = Arc::new(ApiSchema::from_api_schema(schema).unwrap()); diff --git a/graphql/tests/query.rs b/graphql/tests/query.rs index 854f8782dd6..c4974b9728e 100644 --- a/graphql/tests/query.rs +++ b/graphql/tests/query.rs @@ -1,7 +1,7 @@ #[macro_use] extern crate pretty_assertions; -use graph::components::store::{EntityKey, EntityType}; +use graph::components::store::EntityKey; use graph::data::subgraph::schema::DeploymentCreate; use graph::entity; use graph::prelude::SubscriptionResult; @@ -34,14 +34,16 @@ use graph::{ }; use graph_graphql::{prelude::*, subscription::execute_subscription}; use test_store::{ - deployment_state, execute_subgraph_query_with_deadline, graphql_metrics, revert_block, - run_test_sequentially, transact_errors, Store, BLOCK_ONE, GENESIS_PTR, LOAD_MANAGER, LOGGER, - METRICS_REGISTRY, STORE, SUBSCRIPTION_MANAGER, + deployment_state, execute_subgraph_query, execute_subgraph_query_with_deadline, + graphql_metrics, revert_block, run_test_sequentially, transact_errors, Store, BLOCK_ONE, + GENESIS_PTR, LOAD_MANAGER, LOGGER, METRICS_REGISTRY, STORE, SUBSCRIPTION_MANAGER, }; const NETWORK_NAME: &str = "fake_network"; const SONGS_STRING: [&str; 5] = ["s0", "s1", "s2", "s3", "s4"]; const SONGS_BYTES: [&str; 5] = ["0xf0", "0xf1", "0xf2", "0xf3", "0xf4"]; +const MEDIA_STRING: [&str; 7] = ["md0", "md1", "md2", "md3", "md4", "md5", "md6"]; +const MEDIA_BYTES: [&str; 7] = ["0xf0", "0xf1", "0xf2", "0xf3", "0xf4", "0xf5", "0xf6"]; #[derive(Clone, Copy, Debug)] enum IdType { @@ -58,6 +60,13 @@ impl IdType { } } + fn medias(&self) -> &[&str] { + match self { + IdType::String => MEDIA_STRING.as_slice(), + IdType::Bytes => MEDIA_BYTES.as_slice(), + } + } + fn as_str(&self) -> &str { match self { IdType::String => "String", @@ -148,22 +157,27 @@ fn test_schema(id: DeploymentHash, id_type: IdType) -> Schema { name: String! mainBand: Band bands: [Band!]! - writtenSongs: [Song]! @derivedFrom(field: \"writtenBy\") + writtenSongs: [Song!]! @derivedFrom(field: \"writtenBy\") } type Band @entity { id: ID! name: String! members: [Musician!]! @derivedFrom(field: \"bands\") + reviews: [BandReview!]! @derivedFrom(field: \"band\") originalSongs: [Song!]! } type Song @entity { id: @ID@! + sid: String! title: String! writtenBy: Musician! publisher: Publisher! band: Band @derivedFrom(field: \"originalSongs\") + reviews: [SongReview!]! @derivedFrom(field: \"song\") + media: [Media!]! + release: Release! @derivedFrom(field: \"songs\") } type SongStat @entity { @@ -175,6 +189,90 @@ fn test_schema(id: DeploymentHash, id_type: IdType) -> Schema { type Publisher { id: Bytes! } + + interface Review @entity { + id: ID! + body: String! + author: Author! + } + + type SongReview implements Review @entity { + id: ID! + body: String! + song: Song + author: Author! + } + + type BandReview implements Review @entity { + id: ID! + body: String! + band: Band + author: Author! + } + + interface Media { + id: ID! + title: String! + song: Song! + author: User! + } + + type Photo implements Media @entity { + id: ID! + title: String! + song: Song! @derivedFrom(field: \"media\") + author: User! + } + + type Video implements Media @entity { + id: ID! + title: String! + song: Song! @derivedFrom(field: \"media\") + author: User! + } + + interface Release { + id: ID! + title: String! + songs: [Song!]! + } + + type Single implements Release @entity { + id: ID! + title: String! + # It could be a single song + # but let's say a Single represents one song + bonus tracks + songs: [Song!]! + } + + type Album implements Release @entity { + id: ID! + title: String! + songs: [Song!]! + } + + interface Author { + id: ID! + name: String! + } + + type User implements Author @entity { + id: ID! + name: String! + reviews: [Review!]! @derivedFrom(field: \"author\") + bandReviews: [BandReview!]! @derivedFrom(field: \"author\") + songReviews: [SongReview!]! @derivedFrom(field: \"author\") + latestSongReview: SongReview! + latestBandReview: BandReview! + latestReview: Review! + medias: [Media!]! @derivedFrom(field: \"author\") + } + + type AnonymousUser implements Author @entity { + id: ID! + name: String! + reviews: [Review!]! @derivedFrom(field: \"author\") + } "; Schema::parse(&SCHEMA.replace("@ID@", id_type.as_str()), id).expect("Test schema invalid") @@ -185,7 +283,7 @@ async fn insert_test_entities( manifest: SubgraphManifest, id_type: IdType, ) -> DeploymentLocator { - let deployment = DeploymentCreate::new(&manifest, None); + let deployment = DeploymentCreate::new(String::new(), &manifest, None); let name = SubgraphName::new(manifest.id.as_str()).unwrap(); let node_id = NodeId::new("test").unwrap(); let deployment = store @@ -200,18 +298,37 @@ async fn insert_test_entities( .unwrap(); let s = id_type.songs(); + let md = id_type.medias(); let entities0 = vec![ entity! { __typename: "Musician", id: "m1", name: "John", mainBand: "b1", bands: vec!["b1", "b2"] }, entity! { __typename: "Musician", id: "m2", name: "Lisa", mainBand: "b1", bands: vec!["b1"] }, entity! { __typename: "Publisher", id: "0xb1" }, entity! { __typename: "Band", id: "b1", name: "The Musicians", originalSongs: vec![s[1], s[2]] }, entity! { __typename: "Band", id: "b2", name: "The Amateurs", originalSongs: vec![s[1], s[3], s[4]] }, - entity! { __typename: "Song", id: s[1], title: "Cheesy Tune", publisher: "0xb1", writtenBy: "m1" }, - entity! { __typename: "Song", id: s[2], title: "Rock Tune", publisher: "0xb1", writtenBy: "m2" }, - entity! { __typename: "Song", id: s[3], title: "Pop Tune", publisher: "0xb1", writtenBy: "m1" }, - entity! { __typename: "Song", id: s[4], title: "Folk Tune", publisher: "0xb1", writtenBy: "m3" }, + entity! { __typename: "Song", id: s[1], sid: "s1", title: "Cheesy Tune", publisher: "0xb1", writtenBy: "m1", media: vec![md[1], md[2]] }, + entity! { __typename: "Song", id: s[2], sid: "s2", title: "Rock Tune", publisher: "0xb1", writtenBy: "m2", media: vec![md[3], md[4]] }, + entity! { __typename: "Song", id: s[3], sid: "s3", title: "Pop Tune", publisher: "0xb1", writtenBy: "m1", media: vec![md[5]] }, + entity! { __typename: "Song", id: s[4], sid: "s4", title: "Folk Tune", publisher: "0xb1", writtenBy: "m3", media: vec![md[6]] }, entity! { __typename: "SongStat", id: s[1], played: 10 }, entity! { __typename: "SongStat", id: s[2], played: 15 }, + entity! { __typename: "BandReview", id: "r1", body: "Bad musicians", band: "b1", author: "u1" }, + entity! { __typename: "BandReview", id: "r2", body: "Good amateurs", band: "b2", author: "u2" }, + entity! { __typename: "BandReview", id: "r5", body: "Very Bad musicians", band: "b1", author: "u3" }, + entity! { __typename: "SongReview", id: "r3", body: "Bad", song: s[2], author: "u1" }, + entity! { __typename: "SongReview", id: "r4", body: "Good", song: s[3], author: "u2" }, + entity! { __typename: "SongReview", id: "r6", body: "Very Bad", song: s[2], author: "u3" }, + entity! { __typename: "User", id: "u1", name: "Baden", latestSongReview: "r3", latestBandReview: "r1", latestReview: "r1" }, + entity! { __typename: "User", id: "u2", name: "Goodwill", latestSongReview: "r4", latestBandReview: "r2", latestReview: "r2" }, + entity! { __typename: "AnonymousUser", id: "u3", name: "Anonymous 3", latestSongReview: "r6", latestBandReview: "r5", latestReview: "r5" }, + entity! { __typename: "Photo", id: md[1], title: "Cheesy Tune Single Cover", author: "u1" }, + entity! { __typename: "Video", id: md[2], title: "Cheesy Tune Music Video", author: "u2" }, + entity! { __typename: "Photo", id: md[3], title: "Rock Tune Single Cover", author: "u1" }, + entity! { __typename: "Video", id: md[4], title: "Rock Tune Music Video", author: "u2" }, + entity! { __typename: "Photo", id: md[5], title: "Pop Tune Single Cover", author: "u1" }, + entity! { __typename: "Video", id: md[6], title: "Folk Tune Music Video", author: "u2" }, + entity! { __typename: "Album", id: "rl1", title: "Pop and Folk", songs: vec![s[3], s[4]] }, + entity! { __typename: "Single", id: "rl2", title: "Rock", songs: vec![s[2]] }, + entity! { __typename: "Single", id: "rl3", title: "Cheesy", songs: vec![s[1]] }, ]; let entities1 = vec![ @@ -221,12 +338,10 @@ async fn insert_test_entities( async fn insert_at(entities: Vec, deployment: &DeploymentLocator, block_ptr: BlockPtr) { let insert_ops = entities.into_iter().map(|data| EntityOperation::Set { - key: EntityKey { - entity_type: EntityType::new( - data.get("__typename").unwrap().clone().as_string().unwrap(), - ), - entity_id: data.get("id").unwrap().clone().as_string().unwrap().into(), - }, + key: EntityKey::data( + data.get("__typename").unwrap().clone().as_string().unwrap(), + data.get("id").unwrap().clone().as_string().unwrap(), + ), data, }); @@ -265,7 +380,7 @@ async fn execute_query_document_with_variables( METRICS_REGISTRY.clone(), )); let target = QueryTarget::Deployment(id.clone(), Default::default()); - let query = Query::new(query, variables); + let query = Query::new(query, variables, false); runner .run_query_with_complexity(query, target, None, None, None, None) @@ -305,6 +420,16 @@ impl From<&str> for QueryArgs { } } +impl From for QueryArgs { + fn from(query: String) -> Self { + QueryArgs { + query, + variables: None, + max_complexity: None, + } + } +} + impl From<(&str, r::Value)> for QueryArgs { fn from((query, vars): (&str, r::Value)) -> Self { let vars = match vars { @@ -366,7 +491,7 @@ where METRICS_REGISTRY.clone(), )); let target = QueryTarget::Deployment(id.clone(), Default::default()); - let query = Query::new(query, variables); + let query = Query::new(query, variables, false); runner .run_query_with_complexity(query, target, max_complexity, None, None, None) @@ -399,6 +524,7 @@ async fn run_subscription( let query = Query::new( graphql_parser::parse_query(query).unwrap().into_static(), None, + false, ); let options = SubscriptionExecutionOptions { logger: logger.clone(), @@ -468,44 +594,513 @@ fn can_query_one_to_one_relationship() { } #[test] -fn can_query_one_to_many_relationships_in_both_directions() { +fn can_query_one_to_many_relationships_in_both_directions() { + const QUERY: &str = " + query { + musicians(first: 100, orderBy: id) { + name + writtenSongs(first: 100, orderBy: id) { + title + writtenBy { name } + } + } + }"; + + run_query(QUERY, |result, _| { + fn song(title: &str, author: &str) -> r::Value { + object! { + title: title, + writtenBy: object! { name: author } + } + } + + let exp = object! { + musicians: vec![ + object! { + name: "John", + writtenSongs: vec![ + song("Cheesy Tune", "John"), + song("Pop Tune", "John"), + ] + }, + object! { + name: "Lisa", writtenSongs: vec![ song("Rock Tune", "Lisa") ] + }, + object! { + name: "Tom", writtenSongs: vec![ song("Folk Tune", "Tom") ] + }, + object! { + name: "Valerie", writtenSongs: Vec::::new() + }, + ] + }; + + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + +#[test] +fn can_query_many_to_many_relationship() { + const QUERY: &str = " + query { + musicians(first: 100, orderBy: id) { + name + bands(first: 100, orderBy: id) { + name + members(first: 100, orderBy: id) { + name + } + } + } + }"; + + run_query(QUERY, |result, _| { + fn members(names: Vec<&str>) -> Vec { + names + .into_iter() + .map(|name| object! { name: name }) + .collect() + } + + let the_musicians = object! { + name: "The Musicians", + members: members(vec!["John", "Lisa", "Tom"]) + }; + + let the_amateurs = object! { + name: "The Amateurs", + members: members(vec![ "John", "Tom" ]) + }; + + let exp = object! { + musicians: vec![ + object! { name: "John", bands: vec![ the_musicians.clone(), the_amateurs.clone() ]}, + object! { name: "Lisa", bands: vec![ the_musicians.clone() ] }, + object! { name: "Tom", bands: vec![ the_musicians.clone(), the_amateurs.clone() ] }, + object! { name: "Valerie", bands: Vec::::new() } + ] + }; + + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + +#[test] +fn can_query_with_sorting_by_child_entity() { + const QUERY: &str = " + query { + desc: musicians(first: 100, orderBy: mainBand__name, orderDirection: desc) { + name + mainBand { + name + } + } + asc: musicians(first: 100, orderBy: mainBand__name, orderDirection: asc) { + name + mainBand { + name + } + } + }"; + + run_query(QUERY, |result, _| { + let exp = object! { + desc: vec![ + object! { name: "Valerie", mainBand: r::Value::Null }, + object! { name: "Lisa", mainBand: object! { name: "The Musicians" } }, + object! { name: "John", mainBand: object! { name: "The Musicians" } }, + object! { name: "Tom", mainBand: object! { name: "The Amateurs"} }, + ], + asc: vec![ + object! { name: "Tom", mainBand: object! { name: "The Amateurs"} }, + object! { name: "John", mainBand: object! { name: "The Musicians" } }, + object! { name: "Lisa", mainBand: object! { name: "The Musicians" } }, + object! { name: "Valerie", mainBand: r::Value::Null }, + ] + }; + + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + +#[test] +fn can_query_with_sorting_by_derived_child_entity() { + const QUERY: &str = " + query { + desc: songStats(first: 100, orderBy: song__title, orderDirection: desc) { + id + song { + id + title + } + played + } + asc: songStats(first: 100, orderBy: song__title, orderDirection: asc) { + id + song { + id + title + } + played + } + }"; + + run_query(QUERY, |result, id_type| { + let s = id_type.songs(); + let exp = object! { + desc: vec![ + object! { + id: s[2], + song: object! { id: s[2], title: "Rock Tune" }, + played: 15 + }, + object! { + id: s[1], + song: object! { id: s[1], title: "Cheesy Tune" }, + played: 10, + } + ], + asc: vec![ + object! { + id: s[1], + song: object! { id: s[1], title: "Cheesy Tune" }, + played: 10, + }, + object! { + id: s[2], + song: object! { id: s[2], title: "Rock Tune" }, + played: 15 + } + ] + }; + + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + +#[test] +fn can_query_with_sorting_by_child_entity_id() { + const QUERY: &str = " + query { + desc: bandReviews(first: 100, orderBy: author__id, orderDirection: desc) { + body + author { + name + } + } + asc: bandReviews(first: 100, orderBy: author__id, orderDirection: asc) { + body + author { + name + } + } + }"; + + run_query(QUERY, |result, _| { + let exp = object! { + desc: vec![ + object! { body: "Very Bad musicians", author: object! { name: "Anonymous 3" } }, + object! { body: "Good amateurs", author: object! { name: "Goodwill" } }, + object! { body: "Bad musicians", author: object! { name: "Baden" } }, + ], + asc: vec![ + object! { body: "Bad musicians", author: object! { name: "Baden" } }, + object! { body: "Good amateurs", author: object! { name: "Goodwill" } }, + object! { body: "Very Bad musicians", author: object! { name: "Anonymous 3" } }, + ] + }; + + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + +#[test] +fn can_query_with_sorting_by_derived_child_entity_id() { + const QUERY: &str = " + query { + desc: songStats(first: 100, orderBy: song__id, orderDirection: desc) { + id + song { + id + title + } + played + } + asc: songStats(first: 100, orderBy: song__id, orderDirection: asc) { + id + song { + id + title + } + played + } + }"; + + run_query(QUERY, |result, id_type| { + let s = id_type.songs(); + let exp = object! { + desc: vec![ + object! { + id: s[2], + song: object! { id: s[2], title: "Rock Tune" }, + played: 15 + }, + object! { + id: s[1], + song: object! { id: s[1], title: "Cheesy Tune" }, + played: 10, + } + ], + asc: vec![ + object! { + id: s[1], + song: object! { id: s[1], title: "Cheesy Tune" }, + played: 10, + }, + object! { + id: s[2], + song: object! { id: s[2], title: "Rock Tune" }, + played: 15 + } + ] + }; + + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + +#[test] +fn can_query_with_sorting_by_child_interface() { + const QUERY: &str = " + query { + desc: songReviews(first: 100, orderBy: author__name, orderDirection: desc) { + body + author { + name + } + } + asc: songReviews(first: 100, orderBy: author__name, orderDirection: asc) { + body + author { + name + } + } + }"; + + run_query(QUERY, |result, _| { + let exp = object! { + desc: vec![ + object! { body: "Good", author: object! { name: "Goodwill" } }, + object! { body: "Bad", author: object! { name: "Baden" } }, + object! { body: "Very Bad", author: object! { name: "Anonymous 3" } }, + ], + asc: vec![ + object! { body: "Very Bad", author: object! { name: "Anonymous 3" } }, + object! { body: "Bad", author: object! { name: "Baden" } }, + object! { body: "Good", author: object! { name: "Goodwill" } }, + ] + }; + + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + +#[test] +fn can_not_query_interface_with_sorting_by_child_entity() { + const QUERY: &str = " + query { + desc: medias(first: 100, orderBy: author__name, orderDirection: desc) { + title + author { + name + } + } + asc: medias(first: 100, orderBy: author__name, orderDirection: asc) { + title + author { + name + } + } + }"; + + run_query(QUERY, |result, _| { + // Sorting an interface by child-level entity (derived) is not supported + assert!(result.has_errors()); + }); +} + +#[test] +fn can_not_query_interface_with_sorting_by_derived_child_entity() { + const QUERY: &str = " + query { + desc: medias(first: 100, orderBy: song__title, orderDirection: desc) { + title + song { + title + } + } + asc: medias(first: 100, orderBy: song__title, orderDirection: asc) { + title + song { + title + } + } + }"; + + run_query(QUERY, |result, _| { + // Sorting an interface by child-level entity is not supported + assert!(result.has_errors()); + }); +} + +#[test] +fn can_query_with_child_filter_on_list_type_field() { + const QUERY: &str = " + query { + musicians(first: 100, orderBy: id, where: { bands_: { name: \"The Amateurs\" } }) { + name + bands(first: 100, orderBy: id) { + name + } + } + }"; + + run_query(QUERY, |result, _| { + let the_musicians = object! { + name: "The Musicians", + }; + + let the_amateurs = object! { + name: "The Amateurs", + }; + + let exp = object! { + musicians: vec![ + object! { name: "John", bands: vec![ the_musicians.clone(), the_amateurs.clone() ]}, + object! { name: "Tom", bands: vec![ the_musicians.clone(), the_amateurs.clone() ] }, + ] + }; + + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + +#[test] +fn can_query_with_child_filter_on_derived_list_type_field() { + const QUERY: &str = " + query { + musicians(first: 100, orderBy: id, where: { writtenSongs_: { title_contains: \"Rock\" } }) { + name + } + }"; + + run_query(QUERY, |result, _| { + let exp = object! { + musicians: vec![ + object! { name: "Lisa" }, + ] + }; + + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + +#[test] +fn can_query_with_child_filter_on_named_type_field() { + const QUERY: &str = " + query { + musicians(first: 100, orderBy: id, where: { mainBand_: { name_contains: \"The Amateurs\" } }) { + name + mainBand { + id + } + } + }"; + + run_query(QUERY, |result, _| { + let exp = object! { + musicians: vec![ + object! { name: "Tom", mainBand: object! { id: "b2"} } + ] + }; + + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + +#[test] +fn can_query_with_child_filter_on_derived_named_type_field() { + const QUERY: &str = " + query { + songs(first: 100, orderBy: id, where: { band_: { name_contains: \"The Musicians\" } }) { + title + } + }"; + + run_query(QUERY, |result, _| { + let exp = object! { + songs: vec![ + object! { title: "Cheesy Tune" }, + object! { title: "Rock Tune" }, + ] + }; + + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + +#[test] +fn can_query_an_interface_with_child_filter_on_named_type_field() { + const QUERY: &str = " + query { + reviews(first: 100, orderBy: id, where: { author_: { name_starts_with: \"Good\" } }) { + body + author { + name + } + } + }"; + + run_query(QUERY, |result, _| { + let exp = object! { + reviews: vec![ + object! { body: "Good amateurs", author: object! { name: "Goodwill" } }, + object! { body: "Good", author: object! { name: "Goodwill" } }, + ] + }; + + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + +#[test] +fn can_query_with_child_filter_on_derived_interface_list_field() { const QUERY: &str = " query { - musicians(first: 100, orderBy: id) { + users(first: 100, orderBy: id, where: { reviews_: { body_starts_with: \"Good\" } }) { name - writtenSongs(first: 100, orderBy: id) { - title - writtenBy { name } + reviews { + body } } }"; run_query(QUERY, |result, _| { - fn song(title: &str, author: &str) -> r::Value { - object! { - title: title, - writtenBy: object! { name: author } - } - } - let exp = object! { - musicians: vec![ - object! { - name: "John", - writtenSongs: vec![ - song("Cheesy Tune", "John"), - song("Pop Tune", "John"), - ] - }, - object! { - name: "Lisa", writtenSongs: vec![ song("Rock Tune", "Lisa") ] - }, - object! { - name: "Tom", writtenSongs: vec![ song("Folk Tune", "Tom") ] - }, - object! { - name: "Valerie", writtenSongs: Vec::::new() - }, + users: vec![ + object! { name: "Goodwill", reviews: vec![ object! { body: "Good amateurs" }, object! { body: "Good" } ] }, ] }; @@ -515,44 +1110,45 @@ fn can_query_one_to_many_relationships_in_both_directions() { } #[test] -fn can_query_many_to_many_relationship() { +fn can_query_entity_by_child_entity_field() { const QUERY: &str = " query { - musicians(first: 100, orderBy: id) { + users(first: 100, orderBy: id, where: { latestSongReview_: { body_starts_with: \"Good\" } }) { name - bands(first: 100, orderBy: id) { - name - members(first: 100, orderBy: id) { - name - } + latestSongReview { + body } } }"; run_query(QUERY, |result, _| { - fn members(names: Vec<&str>) -> Vec { - names - .into_iter() - .map(|name| object! { name: name }) - .collect() - } - - let the_musicians = object! { - name: "The Musicians", - members: members(vec!["John", "Lisa", "Tom"]) + let exp = object! { + users: vec![ + object! { name: "Goodwill", latestSongReview: object! { body: "Good" } }, + ] }; - let the_amateurs = object! { - name: "The Amateurs", - members: members(vec![ "John", "Tom" ]) - }; + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + +#[test] +fn can_query_entity_by_child_interface_field() { + const QUERY: &str = " + query { + users(first: 100, orderBy: id, where: { latestReview_: { body_starts_with: \"Good\" } }) { + name + latestReview { + body + } + } + }"; + run_query(QUERY, |result, _| { let exp = object! { - musicians: vec![ - object! { name: "John", bands: vec![ the_musicians.clone(), the_amateurs.clone() ]}, - object! { name: "Lisa", bands: vec![ the_musicians.clone() ] }, - object! { name: "Tom", bands: vec![ the_musicians.clone(), the_amateurs.clone() ] }, - object! { name: "Valerie", bands: Vec::::new() } + users: vec![ + object! { name: "Goodwill", latestReview: object! { body: "Good amateurs" } }, ] }; @@ -562,30 +1158,47 @@ fn can_query_many_to_many_relationship() { } #[test] -fn can_query_with_child_filter_on_list_type_field() { +fn can_query_interface_by_child_entity_field() { const QUERY: &str = " query { - musicians(first: 100, orderBy: id, where: { bands_: { name: \"The Amateurs\" } }) { - name - bands(first: 100, orderBy: id) { + reviews(first: 100, orderBy: id, where: { author_: { name_starts_with: \"Good\" } }) { + body + author { name } } }"; run_query(QUERY, |result, _| { - let the_musicians = object! { - name: "The Musicians", + let exp = object! { + reviews: vec![ + object! { body: "Good amateurs", author: object! { name: "Goodwill" } }, + object! { body: "Good", author: object! { name: "Goodwill" } }, + ] }; - let the_amateurs = object! { - name: "The Amateurs", - }; + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + +#[test] +fn can_query_entity_by_child_interface_derived_field() { + const QUERY: &str = " + query { + songs(first: 100, orderBy: id, where: { release_: { title_starts_with: \"Pop\" } }) { + title + release { + title + } + } + }"; + run_query(QUERY, |result, _| { let exp = object! { - musicians: vec![ - object! { name: "John", bands: vec![ the_musicians.clone(), the_amateurs.clone() ]}, - object! { name: "Tom", bands: vec![ the_musicians.clone(), the_amateurs.clone() ] }, + songs: vec![ + object! { title: "Pop Tune", release: object! { title: "Pop and Folk" } }, + object! { title: "Folk Tune", release: object! { title: "Pop and Folk" } }, ] }; @@ -595,18 +1208,21 @@ fn can_query_with_child_filter_on_list_type_field() { } #[test] -fn can_query_with_child_filter_on_derived_list_type_field() { +fn can_query_interface_by_child_entity_derived_field() { const QUERY: &str = " query { - musicians(first: 100, orderBy: id, where: { writtenSongs_: { title_contains: \"Rock\" } }) { - name + medias(first: 100, orderBy: id, where: { song_: { title_starts_with: \"Folk\" } }) { + title + song { + title + } } }"; run_query(QUERY, |result, _| { let exp = object! { - musicians: vec![ - object! { name: "Lisa" }, + medias: vec![ + object! { title: "Folk Tune Music Video", song: object! { title: "Folk Tune" } }, ] }; @@ -616,21 +1232,24 @@ fn can_query_with_child_filter_on_derived_list_type_field() { } #[test] -fn can_query_with_child_filter_on_named_type_field() { +fn can_query_entity_by_child_interface_list_field() { const QUERY: &str = " query { - musicians(first: 100, orderBy: id, where: { mainBand_: { name_contains: \"The Amateurs\" } }) { - name - mainBand { - id + songs(first: 100, orderBy: id, where: { media_: { title_starts_with: \"Cheesy Tune\" } }) { + title + media { + title } } }"; run_query(QUERY, |result, _| { let exp = object! { - musicians: vec![ - object! { name: "Tom", mainBand: object! { id: "b2"} } + songs: vec![ + object! { title: "Cheesy Tune", media: vec![ + object! { title: "Cheesy Tune Single Cover" }, + object! { title: "Cheesy Tune Music Video" } + ] }, ] }; @@ -640,19 +1259,23 @@ fn can_query_with_child_filter_on_named_type_field() { } #[test] -fn can_query_with_child_filter_on_derived_named_type_field() { +fn can_query_entity_by_child_interface_list_derived_field() { const QUERY: &str = " query { - songs(first: 100, orderBy: id, where: { band_: { name_contains: \"The Musicians\" } }) { + songs(first: 100, orderBy: id, where: { reviews_: { body_starts_with: \"Good\" } }) { title + reviews { + body + } } }"; run_query(QUERY, |result, _| { let exp = object! { songs: vec![ - object! { title: "Cheesy Tune" }, - object! { title: "Rock Tune" }, + object! { title: "Pop Tune", reviews: vec![ + object! { body: "Good" }, + ] }, ] }; @@ -932,6 +1555,7 @@ fn instant_timeout() { .unwrap() .into_static(), None, + false, ); match first_result( @@ -1801,3 +2425,194 @@ fn deterministic_error() { assert_eq!(expected, serde_json::to_value(&result).unwrap()); }) } + +#[test] +fn can_query_with_or_filter() { + const QUERY: &str = " + query { + musicians(where: { or: [{ name: \"John\" }, { id: \"m2\" }] }) { + name + id + } + } + "; + + run_query(QUERY, |result, _| { + let exp = object! { + musicians: vec![ + object! { name: "John", id: "m1" }, + object! { name: "Lisa", id: "m2" }, + ], + }; + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + +#[test] +fn can_query_with_or_filter_fields_always_and() { + const QUERY: &str = " + query { + musicians(where: { or: [{ name: \"John\", id: \"m2\" }] }) { + name + id + } + } + "; + + run_query(QUERY, |result, _| { + let exp = object! { + musicians: r::Value::List(vec![]), + }; + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + +#[test] +fn can_query_with_and_filter() { + const QUERY: &str = " + query { + musicians(where: { and: [{ name: \"John\", id: \"m2\" }] }) { + name + id + } + } + "; + + run_query(QUERY, |result, _| { + let exp = object! { + musicians: r::Value::List(vec![]), + }; + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + +#[test] +fn can_query_with_or_and_filter() { + const QUERY: &str = " + query { + musicians( + where: { or: [{ name: \"John\", id: \"m1\" }, { mainBand: \"b2\" }] } + ) { + name + id + } + } + "; + + run_query(QUERY, |result, _| { + let exp = object! { + musicians: vec![ + object! { name: "John", id: "m1" }, + object! { name: "Tom", id: "m3" }, + ], + }; + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + +#[test] +fn can_query_with_or_explicit_and_filter() { + const QUERY: &str = " + query { + musicians( + where: { or: [{ and: [{ name: \"John\", id: \"m1\" }] }, { mainBand: \"b2\" }] } + ) { + name + id + } + } + "; + + run_query(QUERY, |result, _| { + let exp = object! { + musicians: vec![ + object! { name: "John", id: "m1" }, + object! { name: "Tom", id: "m3" }, + ], + }; + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + +#[test] +fn can_query_with_or_implicit_and_filter() { + const QUERY: &str = " + query { + musicians( + where: { or: [{ name: \"John\", id: \"m1\" }, { name: \"Lisa\", id: \"m2\" }] } + ) { + name + id + } + } + "; + + run_query(QUERY, |result, _| { + let exp = object! { + musicians: vec![ + object! { name: "John", id: "m1" }, + object! { name: "Lisa", id: "m2" }, + ], + }; + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp); + }) +} + +#[test] +fn trace_works() { + run_test_sequentially(|store| async move { + let deployment = setup_readonly(store.as_ref()).await; + let query = Query::new( + graphql_parser::parse_query("query { musicians(first: 100) { name } }") + .unwrap() + .into_static(), + None, + true, + ); + + let result = execute_subgraph_query( + query, + QueryTarget::Deployment(deployment.hash.into(), Default::default()), + ) + .await; + + let trace = &result.first().unwrap().trace; + assert!(!trace.is_none(), "result has a trace"); + }) +} + +/// Check that various comparisons against `id` work as expected. This also +/// serves as a test that they work for `String` as well as `Bytes` fields +/// in general +#[test] +fn can_compare_id() { + // For each entry `(cond, sids)` in this array, check that a query with + // a where clause `cond` returns a list of songs whose `sid` are the + // ones listed in `sids` + let checks = [ + ("id_gt: @S2@", vec!["s3", "s4"]), + ("id_gte: @S2@", vec!["s2", "s3", "s4"]), + ("id_lt: @S2@", vec!["s1"]), + ("id_lte: @S2@", vec!["s1", "s2"]), + ("id_not: @S2@", vec!["s1", "s3", "s4"]), + ]; + + for (cond, sids) in checks { + let query = format!("query {{ songs(where: {{ {cond} }}) {{ sid }} }}"); + let sids: Vec<_> = sids + .iter() + .map(|sid| object! { sid: sid.to_string() }) + .collect(); + let exp = object! { songs: sids }; + run_query(query, move |result, id_type| { + let data = extract_data!(result).unwrap(); + assert_eq!(data, exp, "check {} for {:?} ids", cond, id_type); + }) + } +} diff --git a/mock/Cargo.toml b/mock/Cargo.toml index 7a3a79aadc6..02a1a333d74 100644 --- a/mock/Cargo.toml +++ b/mock/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "graph-mock" -version = "0.27.0" -edition = "2021" +version.workspace = true +edition.workspace = true [dependencies] graph = { path = "../graph" } diff --git a/mock/src/metrics_registry.rs b/mock/src/metrics_registry.rs index e7e238d061a..0b450523b64 100644 --- a/mock/src/metrics_registry.rs +++ b/mock/src/metrics_registry.rs @@ -4,6 +4,7 @@ use graph::prometheus::{CounterVec, GaugeVec, HistogramOpts, HistogramVec}; use std::collections::HashMap; +#[derive(Clone)] pub struct MockMetricsRegistry {} impl MockMetricsRegistry { @@ -12,12 +13,6 @@ impl MockMetricsRegistry { } } -impl Clone for MockMetricsRegistry { - fn clone(&self) -> Self { - Self {} - } -} - impl MetricsRegistryTrait for MockMetricsRegistry { fn register(&self, _name: &str, _c: Box) { // Ignore, we do not register metrics diff --git a/node/Cargo.toml b/node/Cargo.toml index b6648c9a7f1..8ff16764655 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "graph-node" -version = "0.27.0" -edition = "2021" +version.workspace = true +edition.workspace = true default-run = "graph-node" [[bin]] @@ -13,13 +13,13 @@ name = "graphman" path = "src/bin/manager.rs" [dependencies] -clap = "3.2.21" -env_logger = "0.9.0" +clap = { version = "3.2.23", features = ["derive", "env"] } +env_logger = "0.9.3" git-testament = "0.2" graphql-parser = "0.4.0" futures = { version = "0.3.1", features = ["compat"] } lazy_static = "1.2.0" -url = "2.2.1" +url = "2.3.1" crossbeam-channel = "0.5.5" graph = { path = "../graph" } graph-core = { path = "../core" } @@ -27,22 +27,21 @@ graph-chain-arweave = { path = "../chain/arweave" } graph-chain-ethereum = { path = "../chain/ethereum" } graph-chain-near = { path = "../chain/near" } graph-chain-cosmos = { path = "../chain/cosmos" } -graph-chain-substreams= { path = "../chain/substreams" } +graph-chain-substreams = { path = "../chain/substreams" } graph-graphql = { path = "../graphql" } graph-runtime-wasm = { path = "../runtime/wasm" } graph-server-http = { path = "../server/http" } graph-server-index-node = { path = "../server/index-node" } -graph-server-json-rpc = { path = "../server/json-rpc"} +graph-server-json-rpc = { path = "../server/json-rpc" } graph-server-websocket = { path = "../server/websocket" } graph-server-metrics = { path = "../server/metrics" } graph-store-postgres = { path = "../store/postgres" } -regex = "1.5.4" serde = { version = "1.0.126", features = ["derive", "rc"] } serde_regex = "1.1.0" -structopt = { version = "0.3.26", features = ["wrap_help"] } -toml = "0.5.7" +toml = "0.7.1" shellexpand = "2.1.0" +termcolor = "1.2.0" diesel = "1.4.8" http = "0.2.5" # must be compatible with the version rust-web3 uses -prometheus = { version ="0.13.1", features = ["push"] } -json-structural-diff = {version = "0.1", features = ["colorize"] } +prometheus = { version = "0.13.3", features = ["push"] } +json-structural-diff = { version = "0.1", features = ["colorize"] } diff --git a/node/resources/tests/full_config.toml b/node/resources/tests/full_config.toml index 97d3be67856..1f907539194 100644 --- a/node/resources/tests/full_config.toml +++ b/node/resources/tests/full_config.toml @@ -47,6 +47,7 @@ ingestor = "index_0" shard = "primary" provider = [ { label = "mainnet-0", url = "http://rpc.mainnet.io", features = ["archive", "traces"] }, + { label = "mainnet-1", details = { type = "web3call", url = "http://rpc.mainnet.io", features = ["archive", "traces"] }}, { label = "firehose", details = { type = "firehose", url = "http://localhost:9000", features = [] }}, { label = "substreams", details = { type = "substreams", url = "http://localhost:9000", features = [] }}, ] diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index d6a776d6814..4a70c01647a 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -1,3 +1,4 @@ +use clap::{Parser, Subcommand}; use config::PoolSize; use git_testament::{git_testament, render_testament}; use graph::{data::graphql::effort::LoadManager, prelude::chrono, prometheus::Registry}; @@ -13,13 +14,15 @@ use graph_chain_ethereum::{EthereumAdapter, EthereumNetworks}; use graph_core::MetricsRegistry; use graph_graphql::prelude::GraphQlRunner; use graph_node::config::{self, Config as Cfg}; +use graph_node::manager::color::Terminal; use graph_node::manager::commands; use graph_node::{ - chain::create_ethereum_networks, + chain::create_all_ethereum_networks, manager::{deployment::DeploymentSearch, PanicSubscriptionManager}, store_builder::StoreBuilder, MetricsContext, }; +use graph_store_postgres::connection_pool::PoolCoordinator; use graph_store_postgres::ChainStore; use graph_store_postgres::{ connection_pool::ConnectionPool, BlockStore, NotificationSender, Shard, Store, SubgraphStore, @@ -27,8 +30,6 @@ use graph_store_postgres::{ }; use lazy_static::lazy_static; use std::{collections::HashMap, env, num::ParseIntError, sync::Arc, time::Duration}; -use structopt::StructOpt; - const VERSION_LABEL_KEY: &str = "version"; git_testament!(TESTAMENT); @@ -37,56 +38,62 @@ lazy_static! { static ref RENDERED_TESTAMENT: String = render_testament!(TESTAMENT); } -#[derive(Clone, Debug, StructOpt)] -#[structopt( +#[derive(Parser, Clone, Debug)] +#[clap( name = "graphman", about = "Management tool for a graph-node infrastructure", author = "Graph Protocol, Inc.", version = RENDERED_TESTAMENT.as_str() )] pub struct Opt { - #[structopt( + #[clap( + long, + default_value = "auto", + help = "whether to colorize the output. Set to 'auto' to colorize only on\nterminals (the default), 'always' to always colorize, or 'never'\nto not colorize at all" + )] + pub color: String, + #[clap( long, short, env = "GRAPH_NODE_CONFIG", help = "the name of the configuration file\n" )] pub config: String, - #[structopt( + #[clap( long, default_value = "default", value_name = "NODE_ID", env = "GRAPH_NODE_ID", - help = "a unique identifier for this node.\nShould have the same value between consecutive node restarts\n" + help = "a unique identifier for this node. Should have the same value\nbetween consecutive node restarts\n" )] pub node_id: String, - #[structopt( + #[clap( long, value_name = "{HOST:PORT|URL}", default_value = "https://api.thegraph.com/ipfs/", env = "IPFS", - help = "HTTP addresses of IPFS nodes" + help = "HTTP addresses of IPFS nodes\n" )] pub ipfs: Vec, - #[structopt( + #[clap( long, default_value = "3", - help = "the size for connection pools. Set to 0\n to use pool size from configuration file\n corresponding to NODE_ID" + help = "the size for connection pools. Set to 0 to use pool size from\nconfiguration file corresponding to NODE_ID\n" )] pub pool_size: u32, - #[structopt(long, value_name = "URL", help = "Base URL for forking subgraphs")] + #[clap(long, value_name = "URL", help = "Base URL for forking subgraphs")] pub fork_base: Option, - #[structopt(long, help = "version label, used for prometheus metrics")] + #[clap(long, help = "version label, used for prometheus metrics")] pub version_label: Option, - #[structopt(subcommand)] + #[clap(subcommand)] pub cmd: Command, } -#[derive(Clone, Debug, StructOpt)] +#[derive(Clone, Debug, Subcommand)] pub enum Command { /// Calculate the transaction speed TxnSpeed { - #[structopt(long, short, default_value = "60")] + #[clap(long, short, default_value = "60")] delay: u64, }, /// Print details about a deployment @@ -99,22 +106,23 @@ pub enum Command { /// The deployment (see above) deployment: DeploymentSearch, /// List only current version - #[structopt(long, short)] + #[clap(long, short)] current: bool, /// List only pending versions - #[structopt(long, short)] + #[clap(long, short)] pending: bool, /// Include status information - #[structopt(long, short)] + #[clap(long, short)] status: bool, /// List only used (current and pending) versions - #[structopt(long, short)] + #[clap(long, short)] used: bool, }, /// Manage unused deployments /// /// Record which deployments are unused with `record`, then remove them /// with `remove` + #[clap(subcommand)] Unused(UnusedCommand), /// Remove a named subgraph Remove { @@ -142,10 +150,10 @@ pub enum Command { Rewind { /// Force rewinding even if the block hash is not found in the local /// database - #[structopt(long, short)] + #[clap(long, short)] force: bool, /// Sleep for this many seconds after pausing subgraphs - #[structopt( + #[clap( long, short, default_value = "10", @@ -159,9 +167,14 @@ pub enum Command { /// The deployments to rewind (see `help info`) deployments: Vec, }, - /// Deploy and run an arbitrary subgraph up to a certain block, although it can surpass it by a few blocks, it's not exact (use for dev and testing purposes) -- WARNING: WILL RUN MIGRATIONS ON THE DB, DO NOT USE IN PRODUCTION + /// Deploy and run an arbitrary subgraph up to a certain block + /// + /// The run can surpass it by a few blocks, it's not exact (use for dev + /// and testing purposes) -- WARNING: WILL RUN MIGRATIONS ON THE DB, DO + /// NOT USE IN PRODUCTION /// - /// Also worth noting that the deployed subgraph will be removed at the end. + /// Also worth noting that the deployed subgraph will be removed at the + /// end. Run { /// Network name (must fit one of the chain) network_name: String, @@ -179,18 +192,21 @@ pub enum Command { /// /// Print information about a configuration file without /// actually connecting to databases or network clients + #[clap(subcommand)] Config(ConfigCommand), /// Listen for store events and print them + #[clap(subcommand)] Listen(ListenCommand), /// Manage deployment copies and grafts + #[clap(subcommand)] Copy(CopyCommand), /// Run a GraphQL query Query { /// Save the JSON query result in this file - #[structopt(long, short)] + #[clap(long, short)] output: Option, /// Save the query trace in this file - #[structopt(long, short)] + #[clap(long, short)] trace: Option, /// The subgraph to query @@ -203,12 +219,54 @@ pub enum Command { vars: Vec, }, /// Get information about chains and manipulate them + #[clap(subcommand)] Chain(ChainCommand), /// Manipulate internal subgraph statistics + #[clap(subcommand)] Stats(StatsCommand), /// Manage database indexes + #[clap(subcommand)] Index(IndexCommand), + + /// Prune deployments + Prune { + /// The deployment to prune (see `help info`) + deployment: DeploymentSearch, + /// Prune tables with a ratio of entities to entity versions lower than this + #[clap(long, short, default_value = "0.20")] + prune_ratio: f64, + /// How much history to keep in blocks + #[clap(long, short = 'y', default_value = "10000")] + history: usize, + }, + + /// General database management + #[clap(subcommand)] + Database(DatabaseCommand), + + /// Delete a deployment and all it's indexed data + /// + /// The deployment can be specified as either a subgraph name, an IPFS + /// hash `Qm..`, or the database namespace `sgdNNN`. Since the same IPFS + /// hash can be deployed in multiple shards, it is possible to specify + /// the shard by adding `:shard` to the IPFS hash. + Drop { + /// The deployment identifier + deployment: DeploymentSearch, + /// Search only for current version + #[clap(long, short)] + current: bool, + /// Search only for pending versions + #[clap(long, short)] + pending: bool, + /// Search only for used (current and pending) versions + #[clap(long, short)] + used: bool, + /// Skip confirmation prompt + #[clap(long, short)] + force: bool, + }, } impl Command { @@ -220,12 +278,12 @@ impl Command { } } -#[derive(Clone, Debug, StructOpt)] +#[derive(Clone, Debug, Subcommand)] pub enum UnusedCommand { /// List unused deployments List { /// Only list unused deployments that still exist - #[structopt(short, long)] + #[clap(short, long)] existing: bool, }, /// Update and record currently unused deployments @@ -236,23 +294,23 @@ pub enum UnusedCommand { /// i.e., smaller deployments are removed before larger ones Remove { /// How many unused deployments to remove (default: all) - #[structopt(short, long)] + #[clap(short, long)] count: Option, /// Remove a specific deployment - #[structopt(short, long, conflicts_with = "count")] + #[clap(short, long, conflicts_with = "count")] deployment: Option, /// Remove unused deployments that were recorded at least this many minutes ago - #[structopt(short, long)] + #[clap(short, long)] older: Option, }, } -#[derive(Clone, Debug, StructOpt)] +#[derive(Clone, Debug, Subcommand)] pub enum ConfigCommand { /// Check and validate the configuration file Check { /// Print the configuration as JSON - #[structopt(long)] + #[clap(long)] print: bool, }, /// Print how a specific subgraph would be placed @@ -267,7 +325,7 @@ pub enum ConfigCommand { /// The names of the nodes that are going to run nodes: Vec, /// Print connections by shard rather than by node - #[structopt(short, long)] + #[clap(short, long)] shard: bool, }, /// Show eligible providers @@ -276,13 +334,13 @@ pub enum ConfigCommand { /// network with the given features. Set the name of the node for which /// to simulate placement with the toplevel `--node-id` option Provider { - #[structopt(short, long, default_value = "")] + #[clap(short, long, default_value = "")] features: String, network: String, }, } -#[derive(Clone, Debug, StructOpt)] +#[derive(Clone, Debug, Subcommand)] pub enum ListenCommand { /// Listen only to assignment events Assignments, @@ -294,7 +352,7 @@ pub enum ListenCommand { entity_types: Vec, }, } -#[derive(Clone, Debug, StructOpt)] +#[derive(Clone, Debug, Subcommand)] pub enum CopyCommand { /// Create a copy of an existing subgraph /// @@ -306,7 +364,7 @@ pub enum CopyCommand { /// should be chosen such that only final blocks are copied Create { /// How far behind `src` subgraph head to copy - #[structopt(long, short, default_value = "200")] + #[clap(long, short, default_value = "200")] offset: u32, /// The source deployment (see `help info`) src: DeploymentSearch, @@ -335,13 +393,13 @@ pub enum CopyCommand { }, } -#[derive(Clone, Debug, StructOpt)] +#[derive(Clone, Debug, Subcommand)] pub enum ChainCommand { /// List all chains that are in the database List, /// Show information about a chain Info { - #[structopt( + #[clap( long, short, default_value = "50", @@ -349,7 +407,7 @@ pub enum ChainCommand { help = "the reorg threshold to check\n" )] reorg_threshold: i32, - #[structopt(long, help = "display block hashes\n")] + #[clap(long, help = "display block hashes\n")] hashes: bool, name: String, }, @@ -361,25 +419,49 @@ pub enum ChainCommand { /// Compares cached blocks with fresh ones and clears the block cache when they differ. CheckBlocks { - #[structopt(subcommand)] // Note that we mark a field as a subcommand + #[clap(subcommand)] // Note that we mark a field as a subcommand method: CheckBlockMethod, - /// Chain name (must be an existing chain, see 'chain list') - #[structopt(empty_values = false)] + #[clap(empty_values = false)] chain_name: String, }, /// Truncates the whole block cache for the given chain. Truncate { /// Chain name (must be an existing chain, see 'chain list') - #[structopt(empty_values = false)] + #[clap(empty_values = false)] chain_name: String, /// Skips confirmation prompt - #[structopt(long, short)] + #[clap(long, short)] force: bool, }, + + /// Execute operations on call cache. + CallCache { + #[clap(subcommand)] + method: CallCacheCommand, + /// Chain name (must be an existing chain, see 'chain list') + #[clap(empty_values = false)] + chain_name: String, + }, +} + +#[derive(Clone, Debug, Subcommand)] +pub enum CallCacheCommand { + /// Remove the call cache of the specified chain. + /// + /// If block numbers are not mentioned in `--from` and `--to`, then all the call cache will be + /// removed. + Remove { + /// Starting block number + #[clap(long, short)] + from: Option, + /// Ending block number + #[clap(long, short)] + to: Option, + }, } -#[derive(Clone, Debug, StructOpt)] +#[derive(Clone, Debug, Subcommand)] pub enum StatsCommand { /// Toggle whether a table is account-like /// @@ -388,7 +470,7 @@ pub enum StatsCommand { /// to distinct entities. It can take up to 5 minutes for this to take /// effect. AccountLike { - #[structopt(long, short, help = "do not set but clear the account-like flag\n")] + #[clap(long, short, help = "do not set but clear the account-like flag\n")] clear: bool, /// The deployment (see `help info`). deployment: DeploymentSearch, @@ -399,27 +481,60 @@ pub enum StatsCommand { /// /// Show how many distinct entities and how many versions the tables of /// each subgraph have. The data is based on the statistics that - /// Postgres keeps, and only refreshed when a table is analyzed. If a - /// table name is passed, perform a full count of entities and versions - /// in that table, which can be very slow, but is needed since the - /// statistics based data can be off by an order of magnitude. + /// Postgres keeps, and only refreshed when a table is analyzed. Show { /// The deployment (see `help info`). deployment: DeploymentSearch, - /// The name of a table to fully count which can be very slow - #[structopt(long, short)] - table: Option, }, /// Perform a SQL ANALYZE in a Entity table Analyze { /// The deployment (see `help info`). deployment: DeploymentSearch, - /// The name of the Entity to ANALYZE, in camel case - entity: String, + /// The name of the Entity to ANALYZE, in camel case. Analyze all + /// tables if omitted + entity: Option, + }, + /// Show statistics targets for the statistics collector + /// + /// For all tables in the given deployment, show the target for each + /// column. A value of `-1` means that the global default is used + Target { + /// The deployment (see `help info`). + deployment: DeploymentSearch, + }, + /// Set the statistics targets for the statistics collector + /// + /// Set (or reset) the target for a deployment. The statistics target + /// determines how much of a table Postgres will sample when it analyzes + /// a table. This can be particularly beneficial when Postgres chooses + /// suboptimal query plans for some queries. Increasing the target will + /// make analyzing tables take longer and will require more space in + /// Postgres' internal statistics storage. + /// + /// If no `columns` are provided, change the statistics target for the + /// `id` and `block_range` columns which will usually be enough to + /// improve query performance, but it might be necessary to increase the + /// target for other columns, too. + SetTarget { + /// The value of the statistics target + #[clap(short, long, default_value = "200", conflicts_with = "reset")] + target: u32, + /// Reset the target so the default is used + #[clap(long, conflicts_with = "target")] + reset: bool, + /// Do not analyze changed tables + #[clap(long)] + no_analyze: bool, + /// The deployment (see `help info`). + deployment: DeploymentSearch, + /// The table for which to set the target, all if omitted + entity: Option, + /// The columns to which to apply the target. Defaults to `id, block_range` + columns: Vec, }, } -#[derive(Clone, Debug, StructOpt)] +#[derive(Clone, Debug, Subcommand)] pub enum IndexCommand { /// Creates a new database index. /// @@ -432,22 +547,22 @@ pub enum IndexCommand { /// This command may be time-consuming. Create { /// The deployment (see `help info`). - #[structopt(empty_values = false)] + #[clap(empty_values = false)] deployment: DeploymentSearch, /// The Entity name. /// /// Can be expressed either in upper camel case (as its GraphQL definition) or in snake case /// (as its SQL table name). - #[structopt(empty_values = false)] + #[clap(empty_values = false)] entity: String, /// The Field names. /// /// Each field can be expressed either in camel case (as its GraphQL definition) or in snake /// case (as its SQL colmun name). - #[structopt(min_values = 1, required = true)] + #[clap(min_values = 1, required = true)] fields: Vec, /// The index method. Defaults to `btree`. - #[structopt( + #[clap( short, long, default_value = "btree", possible_values = &["btree", "hash", "gist", "spgist", "gin", "brin"] )] @@ -455,42 +570,101 @@ pub enum IndexCommand { }, /// Lists existing indexes for a given Entity List { - /// The deployment (see `help info`). - #[structopt(empty_values = false)] + /// Do not list attribute indexes + #[clap(short = 'A', long)] + no_attribute_indexes: bool, + /// Do not list any of the indexes that are generated by default, + /// including attribute indexes + #[clap(short = 'D', long)] + no_default_indexes: bool, + /// Print SQL statements instead of a more human readable overview + #[clap(long)] + sql: bool, + /// When `--sql` is used, make statements run concurrently + #[clap(long, requires = "sql")] + concurrent: bool, + /// When `--sql` is used, add `if not exists` clause + #[clap(long, requires = "sql")] + if_not_exists: bool, + /// The deployment (see `help info`). + #[clap(empty_values = false)] deployment: DeploymentSearch, /// The Entity name. /// /// Can be expressed either in upper camel case (as its GraphQL definition) or in snake case /// (as its SQL table name). - #[structopt(empty_values = false)] + #[clap(empty_values = false)] entity: String, }, /// Drops an index for a given deployment, concurrently Drop { /// The deployment (see `help info`). - #[structopt(empty_values = false)] + #[clap(empty_values = false)] deployment: DeploymentSearch, /// The name of the index to be dropped - #[structopt(empty_values = false)] + #[clap(empty_values = false)] index_name: String, }, } -#[derive(Clone, Debug, StructOpt)] +#[derive(Clone, Debug, Subcommand)] +pub enum DatabaseCommand { + /// Apply any pending migrations to the database schema in all shards + Migrate, + /// Refresh the mapping of tables into different shards + /// + /// This command rebuilds the mappings of tables from one shard into all + /// other shards. It makes it possible to fix these mappings when a + /// database migration was interrupted before it could rebuild the + /// mappings + /// + /// Each shard imports certain tables from all other shards. To recreate + /// the mappings in a given shard, use `--dest SHARD`, to recreate the + /// mappings in other shards that depend on a shard, use `--source + /// SHARD`. Without `--dest` and `--source` options, recreate all + /// possible mappings. Recreating mappings needlessly is harmless, but + /// might take quite a bit of time with a lot of shards. + Remap { + /// Only refresh mappings from SOURCE + #[clap(long, short)] + source: Option, + /// Only refresh mappings inside DEST + #[clap(long, short)] + dest: Option, + /// Continue remapping even when one operation fails + #[clap(long, short)] + force: bool, + }, +} +#[derive(Clone, Debug, Subcommand)] pub enum CheckBlockMethod { - /// The number of the target block - ByHash { hash: String }, - /// The hash of the target block - ByNumber { number: i32 }, + ByHash { + /// The block hash to verify + hash: String, + }, + + /// The number of the target block + ByNumber { + /// The block number to verify + number: i32, + /// Delete duplicated blocks (by number) if found + #[clap(long, short, action)] + delete_duplicates: bool, + }, /// A block number range, inclusive on both ends. ByRange { - #[structopt(long, short)] + /// The first block number to verify + #[clap(long, short)] from: Option, - #[structopt(long, short)] + /// The last block number to verify + #[clap(long, short)] to: Option, + /// Delete duplicated blocks (by number) if found + #[clap(long, short, action)] + delete_duplicates: bool, }, } @@ -570,13 +744,14 @@ impl Context { fn primary_pool(self) -> ConnectionPool { let primary = self.config.primary_store(); + let coord = Arc::new(PoolCoordinator::new(Arc::new(vec![]))); let pool = StoreBuilder::main_pool( &self.logger, &self.node_id, PRIMARY_SHARD.as_str(), primary, self.metrics_registry(), - Arc::new(vec![]), + coord, ); pool.skip_setup(); pool @@ -625,7 +800,7 @@ impl Context { } fn store_and_pools(self) -> (Arc, HashMap) { - let (subgraph_store, pools) = StoreBuilder::make_subgraph_store_and_pools( + let (subgraph_store, pools, _) = StoreBuilder::make_subgraph_store_and_pools( &self.logger, &self.node_id, &self.config, @@ -682,14 +857,14 @@ impl Context { async fn ethereum_networks(&self) -> anyhow::Result { let logger = self.logger.clone(); let registry = self.metrics_registry(); - create_ethereum_networks(logger, registry, &self.config).await + create_all_ethereum_networks(logger, registry, &self.config).await } fn chain_store(self, chain_name: &str) -> anyhow::Result> { use graph::components::store::BlockStore; self.store() .block_store() - .chain_store(&chain_name) + .chain_store(chain_name) .ok_or_else(|| anyhow::anyhow!("Could not find a network named '{}'", chain_name)) } @@ -702,8 +877,7 @@ impl Context { let ethereum_adapter = ethereum_networks .networks .get(chain_name) - .map(|adapters| adapters.cheapest()) - .flatten() + .and_then(|adapters| adapters.cheapest()) .ok_or(anyhow::anyhow!( "Failed to obtain an Ethereum adapter for chain '{}'", chain_name @@ -714,7 +888,9 @@ impl Context { #[tokio::main] async fn main() -> anyhow::Result<()> { - let opt = Opt::from_args(); + let opt = Opt::parse(); + + Terminal::set_color_preference(&opt.color); let version_label = opt.version_label.clone(); // Set up logger @@ -753,7 +929,7 @@ async fn main() -> anyhow::Result<()> { let fork_base = match &opt.fork_base { Some(url) => { // Make sure the endpoint ends with a terminating slash. - let url = if !url.ends_with("/") { + let url = if !url.ends_with('/') { let mut url = url.clone(); url.push('/'); Url::parse(&url) @@ -793,7 +969,7 @@ async fn main() -> anyhow::Result<()> { } => { let (primary, store) = if status { let (store, primary) = ctx.store_and_primary(); - (primary.clone(), Some(store)) + (primary, Some(store)) } else { (ctx.primary_pool(), None) }; @@ -813,7 +989,7 @@ async fn main() -> anyhow::Result<()> { } => { let count = count.unwrap_or(1_000_000); let older = older.map(|older| chrono::Duration::minutes(older as i64)); - commands::unused_deployments::remove(store, count, deployment, older) + commands::unused_deployments::remove(store, count, deployment.as_deref(), older) } } } @@ -834,7 +1010,7 @@ async fn main() -> anyhow::Result<()> { } } } - Remove { name } => commands::remove::run(ctx.subgraph_store(), name), + Remove { name } => commands::remove::run(ctx.subgraph_store(), &name), Create { name } => commands::create::run(ctx.subgraph_store(), name), Unassign { deployment } => { let sender = ctx.notification_sender(); @@ -965,11 +1141,33 @@ async fn main() -> anyhow::Result<()> { ByHash { hash } => { by_hash(&hash, chain_store, ðereum_adapter, &logger).await } - ByNumber { number } => { - by_number(number, chain_store, ðereum_adapter, &logger).await + ByNumber { + number, + delete_duplicates, + } => { + by_number( + number, + chain_store, + ðereum_adapter, + &logger, + delete_duplicates, + ) + .await } - ByRange { from, to } => { - by_range(chain_store, ðereum_adapter, from, to, &logger).await + ByRange { + from, + to, + delete_duplicates, + } => { + by_range( + chain_store, + ðereum_adapter, + from, + to, + &logger, + delete_duplicates, + ) + .await } } } @@ -978,6 +1176,12 @@ async fn main() -> anyhow::Result<()> { let chain_store = ctx.chain_store(&chain_name)?; truncate(chain_store, force) } + CallCache { method, chain_name } => match method { + CallCacheCommand::Remove { from, to } => { + let chain_store = ctx.chain_store(&chain_name)?; + commands::chain::clear_call_cache(chain_store, from, to).await + } + }, } } Stats(cmd) => { @@ -989,8 +1193,9 @@ async fn main() -> anyhow::Result<()> { table, } => { let (store, primary_pool) = ctx.store_and_primary(); + let subgraph_store = store.subgraph_store(); commands::stats::account_like( - store.subgraph_store(), + subgraph_store, primary_pool, clear, &deployment, @@ -998,13 +1203,42 @@ async fn main() -> anyhow::Result<()> { ) .await } - Show { deployment, table } => { - commands::stats::show(ctx.pools(), &deployment, table) - } + Show { deployment } => commands::stats::show(ctx.pools(), &deployment), Analyze { deployment, entity } => { let (store, primary_pool) = ctx.store_and_primary(); let subgraph_store = store.subgraph_store(); - commands::stats::analyze(subgraph_store, primary_pool, deployment, &entity) + commands::stats::analyze( + subgraph_store, + primary_pool, + deployment, + entity.as_deref(), + ) + } + Target { deployment } => { + let (store, primary_pool) = ctx.store_and_primary(); + let subgraph_store = store.subgraph_store(); + commands::stats::target(subgraph_store, primary_pool, &deployment) + } + SetTarget { + target, + reset, + no_analyze, + deployment, + entity, + columns, + } => { + let (store, primary) = ctx.store_and_primary(); + let store = store.subgraph_store(); + let target = if reset { -1 } else { target as i32 }; + commands::stats::set_target( + store, + primary, + &deployment, + entity.as_deref(), + columns, + target, + no_analyze, + ) } } } @@ -1029,8 +1263,27 @@ async fn main() -> anyhow::Result<()> { ) .await } - List { deployment, entity } => { - commands::index::list(subgraph_store, primary_pool, deployment, &entity).await + List { + deployment, + entity, + no_attribute_indexes, + no_default_indexes, + sql, + concurrent, + if_not_exists, + } => { + commands::index::list( + subgraph_store, + primary_pool, + deployment, + &entity, + no_attribute_indexes, + no_default_indexes, + sql, + concurrent, + if_not_exists, + ) + .await } Drop { deployment, @@ -1041,6 +1294,55 @@ async fn main() -> anyhow::Result<()> { } } } + Database(cmd) => { + match cmd { + DatabaseCommand::Migrate => { + /* creating the store builder runs migrations */ + let _store_builder = ctx.store_builder().await; + println!("All database migrations have been applied"); + Ok(()) + } + DatabaseCommand::Remap { + source, + dest, + force, + } => { + let store_builder = ctx.store_builder().await; + commands::database::remap(&store_builder.coord, source, dest, force).await + } + } + } + Prune { + deployment, + history, + prune_ratio, + } => { + let (store, primary_pool) = ctx.store_and_primary(); + commands::prune::run(store, primary_pool, deployment, history, prune_ratio).await + } + Drop { + deployment, + current, + pending, + used, + force, + } => { + let sender = ctx.notification_sender(); + let (store, primary_pool) = ctx.store_and_primary(); + let subgraph_store = store.subgraph_store(); + + commands::drop::run( + primary_pool, + subgraph_store, + sender, + deployment, + current, + pending, + used, + force, + ) + .await + } } } diff --git a/node/src/chain.rs b/node/src/chain.rs index a7713d25260..e57ba5d626b 100644 --- a/node/src/chain.rs +++ b/node/src/chain.rs @@ -1,11 +1,11 @@ use crate::config::{Config, ProviderDetails}; use ethereum::{EthereumNetworks, ProviderEthRpcMetrics}; -use futures::future::join_all; +use futures::future::{join_all, try_join_all}; use futures::TryFutureExt; -use graph::anyhow::Error; +use graph::anyhow::{bail, Error}; use graph::blockchain::{Block as BlockchainBlock, BlockchainKind, ChainIdentifier}; use graph::cheap_clone::CheapClone; -use graph::firehose::{FirehoseEndpoint, FirehoseNetworks}; +use graph::firehose::{FirehoseEndpoint, FirehoseNetworks, SubgraphLimit}; use graph::ipfs_client::IpfsClient; use graph::prelude::{anyhow, tokio}; use graph::prelude::{prost, MetricsRegistry as MetricsRegistryTrait}; @@ -19,7 +19,7 @@ use std::time::Duration; // The status of a provider that we learned from connecting to it #[derive(PartialEq)] -enum ProviderNetworkStatus { +pub enum ProviderNetworkStatus { Broken { chain_id: String, provider: String, @@ -102,64 +102,6 @@ pub fn create_ipfs_clients(logger: &Logger, ipfs_addresses: &Vec) -> Vec .collect() } -/// Parses an Ethereum connection string and returns the network name and Ethereum adapter. -pub async fn create_ethereum_networks( - logger: Logger, - registry: Arc, - config: &Config, -) -> Result { - let eth_rpc_metrics = Arc::new(ProviderEthRpcMetrics::new(registry)); - let mut parsed_networks = EthereumNetworks::new(); - for (name, chain) in &config.chains.chains { - if chain.protocol != BlockchainKind::Ethereum { - continue; - } - - for provider in &chain.providers { - if let ProviderDetails::Web3(web3) = &provider.details { - let capabilities = web3.node_capabilities(); - - let logger = logger.new(o!("provider" => provider.label.clone())); - info!( - logger, - "Creating transport"; - "url" => &web3.url, - "capabilities" => capabilities - ); - - use crate::config::Transport::*; - - let transport = match web3.transport { - Rpc => Transport::new_rpc(Url::parse(&web3.url)?, web3.headers.clone()), - Ipc => Transport::new_ipc(&web3.url).await, - Ws => Transport::new_ws(&web3.url).await, - }; - - let supports_eip_1898 = !web3.features.contains("no_eip1898"); - - parsed_networks.insert( - name.to_string(), - capabilities, - Arc::new( - graph_chain_ethereum::EthereumAdapter::new( - logger, - provider.label.clone(), - &web3.url, - transport, - eth_rpc_metrics.clone(), - supports_eip_1898, - ) - .await, - ), - web3.limit_for(&config.node), - ); - } - } - } - parsed_networks.sort(); - Ok(parsed_networks) -} - pub fn create_substreams_networks( logger: Logger, config: &Config, @@ -182,19 +124,23 @@ pub fn create_substreams_networks( "provider" => &provider.label, ); - let endpoint = FirehoseEndpoint::new( - &provider.label, - &firehose.url, - firehose.token.clone(), - firehose.filters_enabled(), - firehose.compression_enabled(), - firehose.conn_pool_size, - ); - let parsed_networks = networks_by_kind .entry(chain.protocol) .or_insert_with(|| FirehoseNetworks::new()); - parsed_networks.insert(name.to_string(), Arc::new(endpoint)); + + for i in 0..firehose.conn_pool_size { + parsed_networks.insert( + name.to_string(), + Arc::new(FirehoseEndpoint::new( + &format!("{}-{}", provider.label, i), + &firehose.url, + firehose.token.clone(), + firehose.filters_enabled(), + firehose.compression_enabled(), + SubgraphLimit::Unlimited, + )), + ); + } } } } @@ -223,20 +169,35 @@ pub fn create_firehose_networks( "Configuring firehose endpoint"; "provider" => &provider.label, ); - - let endpoint = FirehoseEndpoint::new( - &provider.label, - &firehose.url, - firehose.token.clone(), - firehose.filters_enabled(), - firehose.compression_enabled(), - firehose.conn_pool_size, - ); + let subgraph_limit = match firehose.limit_for(&config.node) { + Some(limit) if limit == 0 => SubgraphLimit::Unlimited, + Some(limit) => SubgraphLimit::Limit(limit), + None => SubgraphLimit::NoTraffic, + }; let parsed_networks = networks_by_kind .entry(chain.protocol) .or_insert_with(|| FirehoseNetworks::new()); - parsed_networks.insert(name.to_string(), Arc::new(endpoint)); + + // Create n FirehoseEndpoints where n is the size of the pool. If a + // subgraph limit is defined for this endpoint then each endpoint + // instance will have their own subgraph limit. + // eg: pool_size = 3 and sg_limit 2 will result in 3 separate instances + // of FirehoseEndpoint and each of those instance can be used in 2 different + // SubgraphInstances. + for i in 0..firehose.conn_pool_size { + parsed_networks.insert( + name.to_string(), + Arc::new(FirehoseEndpoint::new( + &format!("{}-{}", provider.label, i), + &firehose.url, + firehose.token.clone(), + firehose.filters_enabled(), + firehose.compression_enabled(), + subgraph_limit.clone(), + )), + ); + } } } } @@ -421,9 +382,103 @@ where (firehose_networks, idents) } +/// Parses all Ethereum connection strings and returns their network names and +/// `EthereumAdapter`. +pub async fn create_all_ethereum_networks( + logger: Logger, + registry: Arc, + config: &Config, +) -> anyhow::Result { + let eth_rpc_metrics = Arc::new(ProviderEthRpcMetrics::new(registry)); + let eth_networks_futures = config + .chains + .chains + .iter() + .filter(|(_, chain)| chain.protocol == BlockchainKind::Ethereum) + .map(|(name, _)| { + create_ethereum_networks_for_chain(&logger, eth_rpc_metrics.clone(), config, name) + }); + + Ok(try_join_all(eth_networks_futures) + .await? + .into_iter() + .reduce(|mut a, b| { + a.extend(b); + a + }) + .unwrap_or_else(|| EthereumNetworks::new())) +} + +/// Parses a single Ethereum connection string and returns its network name and `EthereumAdapter`. +pub async fn create_ethereum_networks_for_chain( + logger: &Logger, + eth_rpc_metrics: Arc, + config: &Config, + network_name: &str, +) -> anyhow::Result { + let mut parsed_networks = EthereumNetworks::new(); + let chain = config + .chains + .chains + .get(network_name) + .ok_or_else(|| anyhow!("unknown network {}", network_name))?; + + for provider in &chain.providers { + let (web3, call_only) = match &provider.details { + ProviderDetails::Web3Call(web3) => (web3, true), + ProviderDetails::Web3(web3) => (web3, false), + _ => continue, + }; + + let capabilities = web3.node_capabilities(); + if call_only && !capabilities.archive { + bail!("Ethereum call-only adapters require archive features to be enabled"); + } + + let logger = logger.new(o!("provider" => provider.label.clone())); + info!( + logger, + "Creating transport"; + "url" => &web3.url, + "capabilities" => capabilities + ); + + use crate::config::Transport::*; + + let transport = match web3.transport { + Rpc => Transport::new_rpc(Url::parse(&web3.url)?, web3.headers.clone()), + Ipc => Transport::new_ipc(&web3.url).await, + Ws => Transport::new_ws(&web3.url).await, + }; + + let supports_eip_1898 = !web3.features.contains("no_eip1898"); + + parsed_networks.insert( + network_name.to_string(), + capabilities, + Arc::new( + graph_chain_ethereum::EthereumAdapter::new( + logger, + provider.label.clone(), + &web3.url, + transport, + eth_rpc_metrics.clone(), + supports_eip_1898, + call_only, + ) + .await, + ), + web3.limit_for(&config.node), + ); + } + + parsed_networks.sort(); + Ok(parsed_networks) +} + #[cfg(test)] mod test { - use crate::chain::create_ethereum_networks; + use crate::chain::create_all_ethereum_networks; use crate::config::{Config, Opt}; use graph::log::logger; use graph::prelude::tokio; @@ -462,7 +517,7 @@ mod test { prometheus_registry.clone(), )); - let ethereum_networks = create_ethereum_networks(logger, metrics_registry, &config) + let ethereum_networks = create_all_ethereum_networks(logger, metrics_registry, &config) .await .expect("Correctly parse Ethereum network args"); let mut network_names = ethereum_networks.networks.keys().collect::>(); @@ -476,6 +531,7 @@ mod test { archive: true, traces: false, }; + let has_mainnet_with_traces = ethereum_networks .adapter_with_capabilities("mainnet".to_string(), &traces) .is_ok(); diff --git a/node/src/config.rs b/node/src/config.rs index 067b2dc9424..228d3297844 100644 --- a/node/src/config.rs +++ b/node/src/config.rs @@ -1,9 +1,11 @@ use graph::{ anyhow::Error, blockchain::BlockchainKind, + firehose::SUBGRAPHS_PER_CONN, prelude::{ anyhow::{anyhow, bail, Context, Result}, info, + regex::Regex, serde::{ de::{self, value, SeqAccess, Visitor}, Deserialize, Deserializer, Serialize, @@ -15,7 +17,6 @@ use graph_chain_ethereum::{self as ethereum, NodeCapabilities}; use graph_store_postgres::{DeploymentPlacer, Shard as ShardName, PRIMARY_SHARD}; use http::{HeaderMap, Uri}; -use regex::Regex; use std::fs::read_to_string; use std::{ collections::{BTreeMap, BTreeSet}, @@ -547,6 +548,7 @@ pub enum ProviderDetails { Firehose(FirehoseProvider), Web3(Web3Provider), Substreams(FirehoseProvider), + Web3Call(Web3Provider), } const FIREHOSE_FILTER_FEATURE: &str = "filters"; @@ -554,21 +556,26 @@ const FIREHOSE_COMPRESSION_FEATURE: &str = "compression"; const FIREHOSE_PROVIDER_FEATURES: [&str; 2] = [FIREHOSE_FILTER_FEATURE, FIREHOSE_COMPRESSION_FEATURE]; -fn ten() -> u16 { - 10 +fn twenty() -> u16 { + 20 } #[derive(Clone, Debug, Deserialize, Serialize, PartialEq)] pub struct FirehoseProvider { pub url: String, pub token: Option, - #[serde(default = "ten")] + #[serde(default = "twenty")] pub conn_pool_size: u16, #[serde(default)] pub features: BTreeSet, + #[serde(default, rename = "match")] + rules: Vec, } impl FirehoseProvider { + pub fn limit_for(&self, node: &NodeId) -> Option { + self.rules.iter().find_map(|r| r.limit_for(node)) + } pub fn filters_enabled(&self) -> bool { self.features.contains(FIREHOSE_FILTER_FEATURE) } @@ -672,9 +679,16 @@ impl Provider { FIREHOSE_PROVIDER_FEATURES )); } + + if firehose.rules.iter().any(|r| r.limit > SUBGRAPHS_PER_CONN) { + bail!( + "per node subgraph limit for firehose/substreams has to be in the range 0-{}", + SUBGRAPHS_PER_CONN + ); + } } - ProviderDetails::Web3(ref mut web3) => { + ProviderDetails::Web3Call(ref mut web3) | ProviderDetails::Web3(ref mut web3) => { for feature in &web3.features { if !PROVIDER_FEATURES.contains(&feature.as_str()) { return Err(anyhow!( @@ -779,7 +793,7 @@ impl<'de> Deserialize<'de> for Provider { let label = label.ok_or_else(|| serde::de::Error::missing_field("label"))?; let details = match details { - Some(v) => { + Some(mut v) => { if url.is_some() || transport.is_some() || features.is_some() @@ -788,6 +802,14 @@ impl<'de> Deserialize<'de> for Provider { return Err(serde::de::Error::custom("when `details` field is provided, deprecated `url`, `transport`, `features` and `headers` cannot be specified")); } + match v { + ProviderDetails::Firehose(ref mut firehose) + | ProviderDetails::Substreams(ref mut firehose) => { + firehose.rules = nodes + } + _ => {} + } + v } None => ProviderDetails::Web3(Web3Provider { @@ -1097,10 +1119,13 @@ where #[cfg(test)] mod tests { + use crate::config::Web3Rule; + use super::{ Chain, Config, FirehoseProvider, Provider, ProviderDetails, Transport, Web3Provider, }; use graph::blockchain::BlockchainKind; + use graph::prelude::regex::Regex; use graph::prelude::NodeId; use http::{HeaderMap, HeaderValue}; use std::collections::BTreeSet; @@ -1228,10 +1253,8 @@ mod tests { ); assert_eq!(true, actual.is_err()); - assert_eq!( - actual.unwrap_err().to_string(), - "missing field `url` at line 1 column 1" - ); + let err_str = actual.unwrap_err().to_string(); + assert_eq!(err_str.contains("missing field `url`"), true, "{}", err_str); } #[test] @@ -1245,9 +1268,12 @@ mod tests { ); assert_eq!(true, actual.is_err()); + let err_str = actual.unwrap_err().to_string(); assert_eq!( - actual.unwrap_err().to_string(), - "missing field `features` at line 1 column 1" + err_str.contains("missing field `features`"), + true, + "{}", + err_str ); } @@ -1318,7 +1344,8 @@ mod tests { ); assert_eq!(true, actual.is_err()); - assert_eq!(actual.unwrap_err().to_string(), "when `details` field is provided, deprecated `url`, `transport`, `features` and `headers` cannot be specified at line 1 column 1"); + let err_str = actual.unwrap_err().to_string(); + assert_eq!(err_str.contains("when `details` field is provided, deprecated `url`, `transport`, `features` and `headers` cannot be specified"),true, "{}", err_str); } #[test] @@ -1338,7 +1365,8 @@ mod tests { url: "http://localhost:9000".to_owned(), token: None, features: BTreeSet::new(), - conn_pool_size: 10, + conn_pool_size: 20, + rules: vec![], }), }, actual @@ -1362,7 +1390,8 @@ mod tests { url: "http://localhost:9000".to_owned(), token: None, features: BTreeSet::new(), - conn_pool_size: 10, + conn_pool_size: 20, + rules: vec![], }), }, actual @@ -1370,7 +1399,7 @@ mod tests { } #[test] fn it_works_on_new_firehose_provider_from_toml_no_features() { - let actual = toml::from_str( + let mut actual = toml::from_str( r#" label = "firehose" details = { type = "firehose", url = "http://localhost:9000" } @@ -1385,11 +1414,165 @@ mod tests { url: "http://localhost:9000".to_owned(), token: None, features: BTreeSet::new(), - conn_pool_size: 10, + conn_pool_size: 20, + rules: vec![], + }), + }, + actual + ); + assert! {actual.validate().is_ok()}; + } + + #[test] + fn it_works_on_new_firehose_provider_with_doc_example_match() { + let mut actual = toml::from_str( + r#" + label = "firehose" + details = { type = "firehose", url = "http://localhost:9000" } + match = [ + { name = "some_node_.*", limit = 10 }, + { name = "other_node_.*", limit = 0 } ] + "#, + ) + .unwrap(); + + assert_eq!( + Provider { + label: "firehose".to_owned(), + details: ProviderDetails::Firehose(FirehoseProvider { + url: "http://localhost:9000".to_owned(), + token: None, + features: BTreeSet::new(), + conn_pool_size: 20, + rules: vec![ + Web3Rule { + name: Regex::new("some_node_.*").unwrap(), + limit: 10, + }, + Web3Rule { + name: Regex::new("other_node_.*").unwrap(), + limit: 0, + } + ], + }), + }, + actual + ); + assert! { actual.validate().is_ok()}; + } + + #[test] + fn it_errors_on_firehose_provider_with_high_limit() { + let mut actual = toml::from_str( + r#" + label = "substreams" + details = { type = "substreams", url = "http://localhost:9000" } + match = [ + { name = "some_node_.*", limit = 101 }, + { name = "other_node_.*", limit = 0 } ] + "#, + ) + .unwrap(); + + assert_eq!( + Provider { + label: "substreams".to_owned(), + details: ProviderDetails::Substreams(FirehoseProvider { + url: "http://localhost:9000".to_owned(), + token: None, + features: BTreeSet::new(), + conn_pool_size: 20, + rules: vec![ + Web3Rule { + name: Regex::new("some_node_.*").unwrap(), + limit: 101, + }, + Web3Rule { + name: Regex::new("other_node_.*").unwrap(), + limit: 0, + } + ], + }), + }, + actual + ); + assert! { actual.validate().is_err()}; + } + + #[test] + fn it_works_on_new_substreams_provider_with_doc_example_match() { + let mut actual = toml::from_str( + r#" + label = "substreams" + details = { type = "substreams", url = "http://localhost:9000" } + match = [ + { name = "some_node_.*", limit = 10 }, + { name = "other_node_.*", limit = 0 } ] + "#, + ) + .unwrap(); + + assert_eq!( + Provider { + label: "substreams".to_owned(), + details: ProviderDetails::Substreams(FirehoseProvider { + url: "http://localhost:9000".to_owned(), + token: None, + features: BTreeSet::new(), + conn_pool_size: 20, + rules: vec![ + Web3Rule { + name: Regex::new("some_node_.*").unwrap(), + limit: 10, + }, + Web3Rule { + name: Regex::new("other_node_.*").unwrap(), + limit: 0, + } + ], }), }, actual ); + assert! { actual.validate().is_ok()}; + } + + #[test] + fn it_errors_on_substreams_provider_with_high_limit() { + let mut actual = toml::from_str( + r#" + label = "substreams" + details = { type = "substreams", url = "http://localhost:9000" } + match = [ + { name = "some_node_.*", limit = 101 }, + { name = "other_node_.*", limit = 0 } ] + "#, + ) + .unwrap(); + + assert_eq!( + Provider { + label: "substreams".to_owned(), + details: ProviderDetails::Substreams(FirehoseProvider { + url: "http://localhost:9000".to_owned(), + token: None, + features: BTreeSet::new(), + conn_pool_size: 20, + rules: vec![ + Web3Rule { + name: Regex::new("some_node_.*").unwrap(), + limit: 101, + }, + Web3Rule { + name: Regex::new("other_node_.*").unwrap(), + limit: 0, + } + ], + }), + }, + actual + ); + assert! { actual.validate().is_err()}; } #[test] @@ -1441,4 +1624,29 @@ mod tests { read_to_string(&d).expect(&format!("resource {:?} not found", &d)) } + + #[test] + fn it_works_on_web3call_provider_without_transport_from_toml() { + let actual = toml::from_str( + r#" + label = "peering" + details = { type = "web3call", url = "http://localhost:8545", features = [] } + "#, + ) + .unwrap(); + + assert_eq!( + Provider { + label: "peering".to_owned(), + details: ProviderDetails::Web3Call(Web3Provider { + transport: Transport::Rpc, + url: "http://localhost:8545".to_owned(), + features: BTreeSet::new(), + headers: HeaderMap::new(), + rules: Vec::new(), + }), + }, + actual + ); + } } diff --git a/node/src/main.rs b/node/src/main.rs index 098dd1a4e36..243cb78b0e9 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -1,16 +1,18 @@ -use ethereum::chain::{EthereumAdapterSelector, EthereumStreamBuilder}; +use clap::Parser as _; +use ethereum::chain::{EthereumAdapterSelector, EthereumBlockRefetcher, EthereumStreamBuilder}; +use ethereum::codec::HeaderOnlyBlock; use ethereum::{ BlockIngestor as EthereumBlockIngestor, EthereumAdapterTrait, EthereumNetworks, RuntimeAdapter, }; use git_testament::{git_testament, render_testament}; -use graph::blockchain::firehose_block_ingestor::FirehoseBlockIngestor; +use graph::blockchain::firehose_block_ingestor::{FirehoseBlockIngestor, Transforms}; use graph::blockchain::{Block as BlockchainBlock, Blockchain, BlockchainKind, BlockchainMap}; use graph::components::store::BlockStore; use graph::data::graphql::effort::LoadManager; use graph::env::EnvVars; use graph::firehose::{FirehoseEndpoints, FirehoseNetworks}; use graph::log::logger; -use graph::prelude::{IndexNodeServer as _, JsonRpcServer as _, *}; +use graph::prelude::{IndexNodeServer as _, *}; use graph::prometheus::Registry; use graph::url::Url; use graph_chain_arweave::{self as arweave, Block as ArweaveBlock}; @@ -18,14 +20,14 @@ use graph_chain_cosmos::{self as cosmos, Block as CosmosFirehoseBlock}; use graph_chain_ethereum as ethereum; use graph_chain_near::{self as near, HeaderOnlyBlock as NearFirehoseHeaderOnlyBlock}; use graph_chain_substreams as substreams; -use graph_core::polling_monitor::ipfs_service::IpfsService; +use graph_core::polling_monitor::ipfs_service; use graph_core::{ LinkResolver, MetricsRegistry, SubgraphAssignmentProvider as IpfsSubgraphAssignmentProvider, SubgraphInstanceManager, SubgraphRegistrar as IpfsSubgraphRegistrar, }; use graph_graphql::prelude::GraphQlRunner; use graph_node::chain::{ - connect_ethereum_networks, connect_firehose_networks, create_ethereum_networks, + connect_ethereum_networks, connect_firehose_networks, create_all_ethereum_networks, create_firehose_networks, create_ipfs_clients, create_substreams_networks, }; use graph_node::config::Config; @@ -44,7 +46,6 @@ use std::path::Path; use std::sync::atomic; use std::time::Duration; use std::{collections::HashMap, env}; -use structopt::StructOpt; use tokio::sync::mpsc; git_testament!(TESTAMENT); @@ -93,7 +94,8 @@ fn read_expensive_queries( async fn main() { env_logger::init(); - let opt = opt::Opt::from_args(); + let env_vars = Arc::new(EnvVars::from_env().unwrap()); + let opt = opt::Opt::parse(); // Set up logger let logger = logger(opt.debug); @@ -189,32 +191,30 @@ async fn main() { client: reqwest::Client::new(), }); + // Set up Prometheus registry + let prometheus_registry = Arc::new(Registry::new()); + let metrics_registry = Arc::new(MetricsRegistry::new( + logger.clone(), + prometheus_registry.clone(), + )); + // Create a component and subgraph logger factory - let logger_factory = LoggerFactory::new(logger.clone(), elastic_config); + let logger_factory = + LoggerFactory::new(logger.clone(), elastic_config, metrics_registry.clone()); // Try to create IPFS clients for each URL specified in `--ipfs` let ipfs_clients: Vec<_> = create_ipfs_clients(&logger, &opt.ipfs); let ipfs_client = ipfs_clients.first().cloned().expect("Missing IPFS client"); - let ipfs_service = IpfsService::new( + let ipfs_service = ipfs_service( ipfs_client, ENV_VARS.mappings.max_ipfs_file_bytes as u64, ENV_VARS.mappings.ipfs_timeout, - ENV_VARS.mappings.max_ipfs_concurrent_requests, + ENV_VARS.mappings.ipfs_request_limit, ); // Convert the clients into a link resolver. Since we want to get past // possible temporary DNS failures, make the resolver retry - let link_resolver = Arc::new(LinkResolver::new( - ipfs_clients, - Arc::new(EnvVars::default()), - )); - - // Set up Prometheus registry - let prometheus_registry = Arc::new(Registry::new()); - let metrics_registry = Arc::new(MetricsRegistry::new( - logger.clone(), - prometheus_registry.clone(), - )); + let link_resolver = Arc::new(LinkResolver::new(ipfs_clients, env_vars.cheap_clone())); let mut metrics_server = PrometheusMetricsServer::new(&logger_factory, prometheus_registry.clone()); @@ -223,7 +223,7 @@ async fn main() { let eth_networks = if query_only { EthereumNetworks::new() } else { - create_ethereum_networks(logger.clone(), metrics_registry.clone(), &config) + create_all_ethereum_networks(logger.clone(), metrics_registry.clone(), &config) .await .expect("Failed to parse Ethereum networks") }; @@ -257,7 +257,7 @@ async fn main() { ) .await; - let launch_services = |logger: Logger| async move { + let launch_services = |logger: Logger, env_vars: Arc| async move { let subscription_manager = store_builder.subscription_manager(); let chain_head_update_listener = store_builder.chain_head_update_listener(); let primary_pool = store_builder.primary_pool(); @@ -375,13 +375,47 @@ async fn main() { if !opt.disable_block_ingestor { if ethereum_chains.len() > 0 { let block_polling_interval = Duration::from_millis(opt.ethereum_polling_interval); + // Each chain contains both the rpc and firehose endpoints so provided + // IS_FIREHOSE_PREFERRED is set to true, a chain will use firehose if it has + // endpoints set but chains are essentially guaranteed to use EITHER firehose or RPC + // but will never start both. + let (firehose_eth_chains, polling_eth_chains): (HashMap<_, _>, HashMap<_, _>) = + ethereum_chains + .into_iter() + .partition(|(_, chain)| chain.is_firehose_supported()); start_block_ingestor( &logger, &logger_factory, block_polling_interval, - ethereum_chains, + polling_eth_chains, ); + + firehose_networks_by_kind + .get(&BlockchainKind::Ethereum) + .map(|eth_firehose_endpoints| { + start_firehose_block_ingestor::<_, HeaderOnlyBlock>( + &logger, + &network_store, + firehose_eth_chains + .into_iter() + .map(|(name, chain)| { + let firehose_endpoints = eth_firehose_endpoints + .networks + .get(&name) + .expect(&format!("chain {} to have endpoints", name)) + .clone(); + ( + name, + FirehoseChain { + chain, + firehose_endpoints, + }, + ) + }) + .collect(), + ) + }); } start_firehose_block_ingestor::<_, ArweaveBlock>( @@ -415,6 +449,7 @@ async fn main() { let subgraph_instance_manager = SubgraphInstanceManager::new( &logger_factory, + env_vars.cheap_clone(), network_store.subgraph_store(), blockchain_map.cheap_clone(), metrics_registry.clone(), @@ -460,6 +495,7 @@ async fn main() { node_id.clone(), logger.clone(), ) + .await .expect("failed to start JSON-RPC admin server"); // Let the server run forever. @@ -533,15 +569,15 @@ async fn main() { .compat(), ); - graph::spawn( + graph::spawn(async move { metrics_server .serve(metrics_port) + .await .expect("Failed to start metrics server") - .compat(), - ); + }); }; - graph::spawn(launch_services(logger.clone())); + graph::spawn(launch_services(logger.clone(), env_vars.cheap_clone())); // Periodically check for contention in the tokio threadpool. First spawn a // task that simply responds to "ping" requests. Then spawn a separate @@ -694,6 +730,7 @@ fn ethereum_networks_as_chains( eth_adapters.clone(), chain_head_update_listener.clone(), Arc::new(EthereumStreamBuilder {}), + Arc::new(EthereumBlockRefetcher {}), Arc::new(adapter_selector), runtime_adapter, ethereum::ENV_VARS.reorg_threshold, @@ -934,12 +971,16 @@ fn start_firehose_block_ingestor( match store.block_store().chain_store(network_name.as_ref()) { Some(s) => { - let block_ingestor = FirehoseBlockIngestor::::new( + let mut block_ingestor = FirehoseBlockIngestor::::new( s, endpoint.clone(), logger.new(o!("component" => "FirehoseBlockIngestor", "provider" => endpoint.provider.clone())), ); + if C::KIND == BlockchainKind::Ethereum { + block_ingestor = block_ingestor.with_transforms(vec![Transforms::EthereumHeaderOnly]); + } + // Run the Firehose block ingestor in the background graph::spawn(block_ingestor.run()); }, diff --git a/node/src/manager/color.rs b/node/src/manager/color.rs new file mode 100644 index 00000000000..3b1f4dfe4fa --- /dev/null +++ b/node/src/manager/color.rs @@ -0,0 +1,80 @@ +use std::sync::Mutex; +use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor}; + +use graph::prelude::{isatty, lazy_static}; + +use super::CmdResult; + +lazy_static! { + static ref COLOR_MODE: Mutex = Mutex::new(ColorChoice::Auto); +} + +/// A helper to generate colored terminal output +pub struct Terminal { + out: StandardStream, + spec: ColorSpec, +} + +impl Terminal { + pub fn set_color_preference(pref: &str) { + let choice = match pref { + "always" => ColorChoice::Always, + "ansi" => ColorChoice::AlwaysAnsi, + "auto" => { + if isatty::stdout_isatty() { + ColorChoice::Auto + } else { + ColorChoice::Never + } + } + _ => ColorChoice::Never, + }; + *COLOR_MODE.lock().unwrap() = choice; + } + + fn color_preference() -> ColorChoice { + *COLOR_MODE.lock().unwrap() + } + + pub fn new() -> Self { + Self { + out: StandardStream::stdout(Self::color_preference()), + spec: ColorSpec::new(), + } + } + + pub fn green(&mut self) -> CmdResult { + self.spec.set_fg(Some(Color::Green)); + self.out.set_color(&self.spec).map_err(Into::into) + } + + pub fn blue(&mut self) -> CmdResult { + self.spec.set_fg(Some(Color::Blue)); + self.out.set_color(&self.spec).map_err(Into::into) + } + + pub fn dim(&mut self) -> CmdResult { + self.spec.set_dimmed(true); + self.out.set_color(&self.spec).map_err(Into::into) + } + + pub fn bold(&mut self) -> CmdResult { + self.spec.set_bold(true); + self.out.set_color(&self.spec).map_err(Into::into) + } + + pub fn reset(&mut self) -> CmdResult { + self.spec = ColorSpec::new(); + self.out.reset().map_err(Into::into) + } +} + +impl std::io::Write for Terminal { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + self.out.write(buf) + } + + fn flush(&mut self) -> std::io::Result<()> { + self.out.flush() + } +} diff --git a/node/src/manager/commands/chain.rs b/node/src/manager/commands/chain.rs index 764ae06e495..dfa0ab15a6b 100644 --- a/node/src/manager/commands/chain.rs +++ b/node/src/manager/commands/chain.rs @@ -11,6 +11,7 @@ use graph::{ components::store::BlockStore as _, prelude::anyhow::Error, prelude::serde_json as json, }; use graph_store_postgres::BlockStore; +use graph_store_postgres::ChainStore; use graph_store_postgres::{ command_support::catalog::block_store, connection_pool::ConnectionPool, }; @@ -49,6 +50,16 @@ pub async fn list(primary: ConnectionPool, store: Arc) -> Result<(), Ok(()) } +pub async fn clear_call_cache( + chain_store: Arc, + from: Option, + to: Option, +) -> Result<(), Error> { + chain_store.clear_call_cache(from, to).await?; + println!("The call cache has cleared"); + Ok(()) +} + pub async fn info( primary: ConnectionPool, store: Arc, diff --git a/node/src/manager/commands/check_blocks.rs b/node/src/manager/commands/check_blocks.rs index e5d2c0c3836..5d2f65714c3 100644 --- a/node/src/manager/commands/check_blocks.rs +++ b/node/src/manager/commands/check_blocks.rs @@ -1,3 +1,4 @@ +use crate::manager::prompt::prompt_for_confirmation; use graph::{ anyhow::{bail, ensure}, components::store::ChainStore as ChainStoreTrait, @@ -26,9 +27,18 @@ pub async fn by_number( chain_store: Arc, ethereum_adapter: &EthereumAdapter, logger: &Logger, + delete_duplicates: bool, ) -> anyhow::Result<()> { - let block_hash = steps::resolve_block_hash_from_block_number(number, &chain_store)?; - run(&block_hash, &chain_store, ethereum_adapter, logger).await + let block_hashes = steps::resolve_block_hash_from_block_number(number, &chain_store)?; + + match &block_hashes.as_slice() { + [] => bail!("Could not find a block with number {} in store", number), + [block_hash] => run(block_hash, &chain_store, ethereum_adapter, logger).await, + &block_hashes => { + handle_multiple_block_hashes(number, block_hashes, &chain_store, delete_duplicates) + .await + } + } } pub async fn by_range( @@ -37,6 +47,7 @@ pub async fn by_range( range_from: Option, range_to: Option, logger: &Logger, + delete_duplicates: bool, ) -> anyhow::Result<()> { // Resolve a range of block numbers into a collection of blocks hashes let range = ranges::Range::new(range_from, range_to)?; @@ -48,15 +59,31 @@ pub async fn by_range( // FIXME: This performs poorly. // TODO: This could be turned into async code for block_number in range.lower_bound..=max { - println!("Fixing block [{block_number}/{max}]"); - let block_hash = steps::resolve_block_hash_from_block_number(block_number, &chain_store)?; - run(&block_hash, &chain_store, ethereum_adapter, logger).await? + println!("Checking block [{block_number}/{max}]"); + let block_hashes = steps::resolve_block_hash_from_block_number(block_number, &chain_store)?; + match &block_hashes.as_slice() { + [] => eprintln!("Found no block hash with number {block_number}"), + [block_hash] => run(block_hash, &chain_store, ethereum_adapter, logger).await?, + &block_hashes => { + handle_multiple_block_hashes( + block_number, + block_hashes, + &chain_store, + delete_duplicates, + ) + .await? + } + } } Ok(()) } pub fn truncate(chain_store: Arc, skip_confirmation: bool) -> anyhow::Result<()> { - if !skip_confirmation && !helpers::prompt_for_confirmation()? { + let prompt = format!( + "This will delete all cached blocks for {}.\nProceed?", + chain_store.chain + ); + if !skip_confirmation && !prompt_for_confirmation(&prompt)? { println!("Aborting."); return Ok(()); } @@ -72,34 +99,71 @@ async fn run( ethereum_adapter: &EthereumAdapter, logger: &Logger, ) -> anyhow::Result<()> { - let cached_block = steps::fetch_single_cached_block(*block_hash, &chain_store)?; + let cached_block = steps::fetch_single_cached_block(*block_hash, chain_store)?; let provider_block = - steps::fetch_single_provider_block(&block_hash, ethereum_adapter, logger).await?; + steps::fetch_single_provider_block(block_hash, ethereum_adapter, logger).await?; let diff = steps::diff_block_pair(&cached_block, &provider_block); - steps::report_difference(diff.as_deref(), &block_hash); + steps::report_difference(diff.as_deref(), block_hash); if diff.is_some() { - steps::delete_block(&block_hash, &chain_store)?; + steps::delete_block(block_hash, chain_store)?; + } + Ok(()) +} + +async fn handle_multiple_block_hashes( + block_number: i32, + block_hashes: &[H256], + chain_store: &ChainStore, + delete_duplicates: bool, +) -> anyhow::Result<()> { + println!( + "graphman found {} different block hashes for block number {} in the store \ + and is unable to tell which one to check:", + block_hashes.len(), + block_number + ); + for (num, hash) in block_hashes.iter().enumerate() { + println!("{:>4}: {hash:?}", num + 1); + } + if delete_duplicates { + println!("Deleting duplicated blocks..."); + for hash in block_hashes { + steps::delete_block(hash, chain_store)?; + } + } else { + eprintln!( + "Operation aborted for block number {block_number}.\n\ + To delete the duplicated blocks and continue this operation, rerun this command with \ + the `--delete-duplicates` option." + ) } Ok(()) } mod steps { use super::*; + use futures::compat::Future01CompatExt; - use graph::prelude::serde_json::{self, Value}; + use graph::{ + anyhow::bail, + prelude::serde_json::{self, Value}, + }; use json_structural_diff::{colorize as diff_to_string, JsonDiff}; /// Queries the [`ChainStore`] about the block hash for the given block number. /// - /// Errors on a non-unary result. + /// Multiple block hashes can be returned as the store does not enforce uniqueness based on + /// block numbers. + /// Returns an empty vector if no block hash is found. pub(super) fn resolve_block_hash_from_block_number( number: i32, chain_store: &ChainStore, - ) -> anyhow::Result { + ) -> anyhow::Result> { let block_hashes = chain_store.block_hashes_by_block_number(number)?; - let hash = helpers::get_single_item("block hash", block_hashes) - .with_context(|| format!("Failed to locate block number {} in store", number))?; - Ok(H256(hash.as_slice().try_into()?)) + Ok(block_hashes + .into_iter() + .map(|x| H256::from_slice(&x.as_slice()[..32])) + .collect()) } /// Queries the [`ChainStore`] for a cached block given a block hash. @@ -110,27 +174,30 @@ mod steps { chain_store: &ChainStore, ) -> anyhow::Result { let blocks = chain_store.blocks(&[block_hash.into()])?; - if blocks.is_empty() { - bail!("Could not find a block with hash={block_hash:?} in cache") - } - helpers::get_single_item("block", blocks) - .with_context(|| format!("Failed to locate block {} in store.", block_hash)) + match blocks.len() { + 0 => bail!("Failed to locate block with hash {} in store", block_hash), + 1 => {} + _ => bail!("Found multiple blocks with hash {} in store", block_hash), + }; + // Unwrap: We just checked that the vector has a single element + Ok(blocks.into_iter().next().unwrap()) } /// Fetches a block from a JRPC endpoint. /// - /// Errors on a non-unary result. + /// Errors on provider failure or if the returned block has a different hash than the one + /// requested. pub(super) async fn fetch_single_provider_block( block_hash: &H256, ethereum_adapter: &EthereumAdapter, logger: &Logger, ) -> anyhow::Result { let provider_block = ethereum_adapter - .block_by_hash(&logger, *block_hash) + .block_by_hash(logger, *block_hash) .compat() .await .with_context(|| format!("failed to fetch block {block_hash}"))? - .ok_or_else(|| anyhow!("JRPC provider found no block {block_hash}"))?; + .ok_or_else(|| anyhow!("JRPC provider found no block with hash {block_hash:?}"))?; ensure!( provider_block.hash == Some(*block_hash), "Provider responded with a different block hash" @@ -146,7 +213,7 @@ mod steps { if a == b { None } else { - match JsonDiff::diff(a, &b, false).diff { + match JsonDiff::diff(a, b, false).diff { // The diff could potentially be a `Value::Null`, which is equivalent to not being // different at all. None | Some(Value::Null) => None, @@ -172,7 +239,7 @@ mod steps { /// Attempts to delete a block from the block cache. pub(super) fn delete_block(hash: &H256, chain_store: &ChainStore) -> anyhow::Result<()> { println!("Deleting block {hash} from cache."); - chain_store.delete_blocks(&[&hash])?; + chain_store.delete_blocks(&[hash])?; println!("Done."); Ok(()) } @@ -187,7 +254,6 @@ mod steps { mod helpers { use super::*; use graph::prelude::hex; - use std::io::{self, Write}; /// Tries to parse a [`H256`] from a hex string. pub(super) fn parse_block_hash(hash: &str) -> anyhow::Result { @@ -195,34 +261,6 @@ mod helpers { let hash = hex::decode(hash)?; Ok(H256::from_slice(&hash)) } - - /// Asks users if they are certain about truncating the whole block cache. - pub(super) fn prompt_for_confirmation() -> anyhow::Result { - print!("This will delete all cached blocks.\nProceed? [y/N] "); - io::stdout().flush()?; - - let mut answer = String::new(); - io::stdin().read_line(&mut answer)?; - answer.make_ascii_lowercase(); - - match answer.trim() { - "y" | "yes" => Ok(true), - _ => Ok(false), - } - } - - /// Convenience function for extracting values from unary sets. - pub(super) fn get_single_item(name: &'static str, collection: I) -> anyhow::Result - where - I: IntoIterator, - { - let mut iterator = collection.into_iter(); - match (iterator.next(), iterator.next()) { - (Some(a), None) => Ok(a), - (None, None) => bail!("Expected a single {name} but found none."), - _ => bail!("Expected a single {name} but found multiple occurrences."), - } - } } /// Custom range type diff --git a/node/src/manager/commands/config.rs b/node/src/manager/commands/config.rs index f33552b308c..561a1da013e 100644 --- a/node/src/manager/commands/config.rs +++ b/node/src/manager/commands/config.rs @@ -10,10 +10,10 @@ use graph::{ }, slog::Logger, }; -use graph_chain_ethereum::{EthereumAdapterTrait, NodeCapabilities}; +use graph_chain_ethereum::{EthereumAdapterTrait, NodeCapabilities, ProviderEthRpcMetrics}; use graph_store_postgres::DeploymentPlacer; -use crate::config::Config; +use crate::{chain::create_ethereum_networks_for_chain, config::Config}; pub fn place(placer: &dyn DeploymentPlacer, name: &str, network: &str) -> Result<(), Error> { match placer.place(name, network).map_err(|s| anyhow!(s))? { @@ -54,7 +54,7 @@ pub fn pools(config: &Config, nodes: Vec, shard: bool) -> Result<(), Err let nodes: Vec<_> = nodes .into_iter() .map(|name| { - NodeId::new(name.replace("-", "_")) + NodeId::new(name.replace('-', "_")) .map_err(|()| anyhow!("illegal node name `{}`", name)) }) .collect::>()?; @@ -121,9 +121,9 @@ pub async fn provider( } let caps = caps_from_features(features)?; + let eth_rpc_metrics = Arc::new(ProviderEthRpcMetrics::new(registry)); let networks = - crate::manager::commands::run::create_ethereum_networks(logger, registry, config, &network) - .await?; + create_ethereum_networks_for_chain(&logger, eth_rpc_metrics, config, &network).await?; let adapters = networks .networks .get(&network) diff --git a/node/src/manager/commands/database.rs b/node/src/manager/commands/database.rs new file mode 100644 index 00000000000..17d11c041cf --- /dev/null +++ b/node/src/manager/commands/database.rs @@ -0,0 +1,60 @@ +use std::{io::Write, time::Instant}; + +use graph::prelude::anyhow; +use graph_store_postgres::connection_pool::PoolCoordinator; + +pub async fn remap( + coord: &PoolCoordinator, + src: Option, + dst: Option, + force: bool, +) -> Result<(), anyhow::Error> { + let pools = { + let mut pools = coord.pools(); + pools.sort_by(|pool1, pool2| pool1.shard.as_str().cmp(pool2.shard.as_str())); + pools + }; + let servers = coord.servers(); + + if let Some(src) = &src { + if !servers.iter().any(|srv| srv.shard.as_str() == src) { + return Err(anyhow!("unknown source shard {src}")); + } + } + if let Some(dst) = &dst { + if !pools.iter().any(|pool| pool.shard.as_str() == dst) { + return Err(anyhow!("unknown destination shard {dst}")); + } + } + + let servers = servers.iter().filter(|srv| match &src { + None => true, + Some(src) => srv.shard.as_str() == src, + }); + + for server in servers { + let pools = pools.iter().filter(|pool| match &dst { + None => true, + Some(dst) => pool.shard.as_str() == dst, + }); + + for pool in pools { + let start = Instant::now(); + print!( + "Remapping imports from {} in shard {}", + server.shard, pool.shard + ); + std::io::stdout().flush().ok(); + if let Err(e) = pool.remap(server) { + println!(" FAILED"); + println!(" error: {e}"); + if !force { + return Ok(()); + } + } else { + println!(" (done in {}s)", start.elapsed().as_secs()); + } + } + } + Ok(()) +} diff --git a/node/src/manager/commands/drop.rs b/node/src/manager/commands/drop.rs new file mode 100644 index 00000000000..30d724575c5 --- /dev/null +++ b/node/src/manager/commands/drop.rs @@ -0,0 +1,68 @@ +use crate::manager::{ + deployment::{Deployment, DeploymentSearch}, + display::List, + prompt::prompt_for_confirmation, +}; +use graph::anyhow::{self, bail}; +use graph_store_postgres::{connection_pool::ConnectionPool, NotificationSender, SubgraphStore}; +use std::sync::Arc; + +/// Finds, unassigns, record and remove matching deployments. +/// +/// Asks for confirmation before removing any data. +/// This is a convenience fuction that to call a series of other graphman commands. +pub async fn run( + primary_pool: ConnectionPool, + subgraph_store: Arc, + sender: Arc, + search_term: DeploymentSearch, + current: bool, + pending: bool, + used: bool, + skip_confirmation: bool, +) -> anyhow::Result<()> { + // call `graphman info` to find matching deployments + let deployments = search_term.find(primary_pool.clone(), current, pending, used)?; + if deployments.is_empty() { + bail!("Found no deployment for search_term: {search_term}") + } else { + print_deployments(&deployments); + if !skip_confirmation && !prompt_for_confirmation("\nContinue?")? { + println!("Execution aborted by user"); + return Ok(()); + } + } + // call `graphman unassign` to stop any active deployments + crate::manager::commands::assign::unassign(primary_pool, &sender, &search_term).await?; + + // call `graphman remove` to unregister the subgraph's name + for deployment in &deployments { + crate::manager::commands::remove::run(subgraph_store.clone(), &deployment.name)?; + } + + // call `graphman unused record` to register those deployments unused + crate::manager::commands::unused_deployments::record(subgraph_store.clone())?; + + // call `graphman unused remove` to remove each deployment's data + for deployment in &deployments { + crate::manager::commands::unused_deployments::remove( + subgraph_store.clone(), + 1_000_000, + Some(&deployment.deployment), + None, + )?; + } + Ok(()) +} + +fn print_deployments(deployments: &[Deployment]) { + let mut list = List::new(vec!["name", "deployment"]); + println!("Found {} deployment(s) to remove:", deployments.len()); + for deployment in deployments { + list.append(vec![ + deployment.name.to_string(), + deployment.deployment.to_string(), + ]); + } + list.render(); +} diff --git a/node/src/manager/commands/index.rs b/node/src/manager/commands/index.rs index 2ef9582d1b7..657e92f0047 100644 --- a/node/src/manager/commands/index.rs +++ b/node/src/manager/commands/index.rs @@ -1,6 +1,15 @@ -use crate::manager::deployment::DeploymentSearch; -use graph::prelude::{anyhow, StoreError}; -use graph_store_postgres::{connection_pool::ConnectionPool, SubgraphStore}; +use crate::manager::{color::Terminal, deployment::DeploymentSearch, CmdResult}; +use graph::{ + components::store::DeploymentLocator, + itertools::Itertools, + prelude::{anyhow, StoreError}, +}; +use graph_store_postgres::{ + command_support::index::{CreateIndex, Method}, + connection_pool::ConnectionPool, + SubgraphStore, +}; +use std::io::Write as _; use std::{collections::HashSet, sync::Arc}; fn validate_fields>(fields: &[T]) -> Result<(), anyhow::Error> { @@ -26,13 +35,16 @@ pub async fn create( validate_fields(&field_names)?; let deployment_locator = search.locate_unique(&pool)?; println!("Index creation started. Please wait."); + let index_method = index_method + .parse::() + .map_err(|()| anyhow!("unknown index method `{}`", index_method))?; match store .create_manual_index(&deployment_locator, entity_name, field_names, index_method) .await { Ok(()) => Ok(()), Err(StoreError::Canceled) => { - eprintln!("Index creation attempt faield. Please retry."); + eprintln!("Index creation attempt failed. Please retry."); ::std::process::exit(1); } Err(other) => Err(anyhow::anyhow!(other)), @@ -44,13 +56,131 @@ pub async fn list( pool: ConnectionPool, search: DeploymentSearch, entity_name: &str, + no_attribute_indexes: bool, + no_default_indexes: bool, + to_sql: bool, + concurrent: bool, + if_not_exists: bool, ) -> Result<(), anyhow::Error> { + fn header( + term: &mut Terminal, + indexes: &[CreateIndex], + loc: &DeploymentLocator, + entity: &str, + ) -> Result<(), anyhow::Error> { + use CreateIndex::*; + + let index = indexes.iter().find(|index| matches!(index, Parsed { .. })); + match index { + Some(Parsed { nsp, table, .. }) => { + term.bold()?; + writeln!(term, "{:^76}", format!("Indexes for {nsp}.{table}"))?; + term.reset()?; + } + _ => { + writeln!( + term, + "{:^76}", + format!("Indexes for sgd{}.{entity}", loc.id) + )?; + } + } + writeln!(term, "{: ^12} IPFS hash: {}", "", loc.hash)?; + writeln!(term, "{:-^76}", "")?; + Ok(()) + } + + fn footer(term: &mut Terminal) -> Result<(), anyhow::Error> { + writeln!(term, " (a): account-like flag set")?; + Ok(()) + } + + fn print_index(term: &mut Terminal, index: &CreateIndex) -> CmdResult { + use CreateIndex::*; + + match index { + Unknown { defn } => { + writeln!(term, "*unknown*")?; + writeln!(term, " {defn}")?; + } + Parsed { + unique, + name, + nsp: _, + table: _, + method, + columns, + cond, + with, + } => { + let unique = if *unique { " unique" } else { "" }; + let start = format!("{unique} using {method}"); + let columns = columns.into_iter().map(|c| c.to_string()).join(", "); + + term.green()?; + if index.is_default_index() { + term.dim()?; + } else { + term.bold()?; + } + write!(term, "{name}")?; + term.reset()?; + write!(term, "{start}")?; + term.blue()?; + if name.len() + start.len() + columns.len() <= 76 { + writeln!(term, "({columns})")?; + } else { + writeln!(term, "\n on ({})", columns)?; + } + term.reset()?; + if let Some(cond) = cond { + writeln!(term, " where {cond}")?; + } + if let Some(with) = with { + writeln!(term, " with {with}")?; + } + } + } + Ok(()) + } + let deployment_locator = search.locate_unique(&pool)?; - let indexes: Vec = store - .indexes_for_entity(&deployment_locator, entity_name) - .await?; - for index in &indexes { - println!("{index}") + let indexes: Vec<_> = { + let mut indexes = store + .indexes_for_entity(&deployment_locator, entity_name) + .await?; + if no_attribute_indexes { + indexes = indexes + .into_iter() + .filter(|idx| !idx.is_attribute_index()) + .collect(); + } + if no_default_indexes { + indexes = indexes + .into_iter() + .filter(|idx| !idx.is_default_index()) + .collect(); + } + indexes + }; + + let mut term = Terminal::new(); + + if to_sql { + for index in indexes { + writeln!(term, "{};", index.to_sql(concurrent, if_not_exists)?)?; + } + } else { + let mut first = true; + header(&mut term, &indexes, &deployment_locator, entity_name)?; + for index in &indexes { + if first { + first = false; + } else { + writeln!(term, "{:-^76}", "")?; + } + print_index(&mut term, index)?; + } } Ok(()) } diff --git a/node/src/manager/commands/info.rs b/node/src/manager/commands/info.rs index 19994b6ec80..76781d74d57 100644 --- a/node/src/manager/commands/info.rs +++ b/node/src/manager/commands/info.rs @@ -5,31 +5,6 @@ use graph_store_postgres::{connection_pool::ConnectionPool, Store}; use crate::manager::deployment::{Deployment, DeploymentSearch}; -fn find( - pool: ConnectionPool, - search: DeploymentSearch, - current: bool, - pending: bool, - used: bool, -) -> Result, anyhow::Error> { - let current = current || used; - let pending = pending || used; - - let deployments = search.lookup(&pool)?; - // Filter by status; if neither `current` or `pending` are set, list - // all deployments - let deployments: Vec<_> = deployments - .into_iter() - .filter(|deployment| match (current, pending) { - (true, false) => deployment.status == "current", - (false, true) => deployment.status == "pending", - (true, true) => deployment.status == "current" || deployment.status == "pending", - (false, false) => true, - }) - .collect(); - Ok(deployments) -} - pub fn run( pool: ConnectionPool, store: Option>, @@ -38,7 +13,7 @@ pub fn run( pending: bool, used: bool, ) -> Result<(), anyhow::Error> { - let deployments = find(pool, search, current, pending, used)?; + let deployments = search.find(pool, current, pending, used)?; let ids: Vec<_> = deployments.iter().map(|d| d.locator().id).collect(); let statuses = match store { Some(store) => store.status(status::Filter::DeploymentIds(ids))?, diff --git a/node/src/manager/commands/mod.rs b/node/src/manager/commands/mod.rs index c3ae31bbb79..de7267da828 100644 --- a/node/src/manager/commands/mod.rs +++ b/node/src/manager/commands/mod.rs @@ -4,9 +4,12 @@ pub mod check_blocks; pub mod config; pub mod copy; pub mod create; +pub mod database; +pub mod drop; pub mod index; pub mod info; pub mod listen; +pub mod prune; pub mod query; pub mod remove; pub mod rewind; diff --git a/node/src/manager/commands/prune.rs b/node/src/manager/commands/prune.rs new file mode 100644 index 00000000000..95676b7c3ee --- /dev/null +++ b/node/src/manager/commands/prune.rs @@ -0,0 +1,192 @@ +use std::{ + collections::HashSet, + io::Write, + sync::Arc, + time::{Duration, Instant}, +}; + +use graph::{ + components::store::{PruneReporter, StatusStore}, + data::subgraph::status, + prelude::{anyhow, BlockNumber}, +}; +use graph_chain_ethereum::ENV_VARS as ETH_ENV; +use graph_store_postgres::{connection_pool::ConnectionPool, Store}; + +use crate::manager::{ + commands::stats::{abbreviate_table_name, show_stats}, + deployment::DeploymentSearch, +}; + +struct Progress { + start: Instant, + analyze_start: Instant, + switch_start: Instant, + table_start: Instant, + final_start: Instant, + nonfinal_start: Instant, +} + +impl Progress { + fn new() -> Self { + Self { + start: Instant::now(), + analyze_start: Instant::now(), + switch_start: Instant::now(), + final_start: Instant::now(), + table_start: Instant::now(), + nonfinal_start: Instant::now(), + } + } +} + +fn print_copy_header() { + println!("{:^30} | {:^10} | {:^11}", "table", "versions", "time"); + println!("{:-^30}-+-{:-^10}-+-{:-^11}", "", "", ""); + std::io::stdout().flush().ok(); +} + +fn print_copy_row(table: &str, total_rows: usize, elapsed: Duration) { + print!( + "\r{:<30} | {:>10} | {:>9}s", + abbreviate_table_name(table, 30), + total_rows, + elapsed.as_secs() + ); + std::io::stdout().flush().ok(); +} + +impl PruneReporter for Progress { + fn start_analyze(&mut self) { + print!("Analyze tables"); + self.analyze_start = Instant::now(); + } + + fn start_analyze_table(&mut self, table: &str) { + print!("\rAnalyze {table:48} "); + std::io::stdout().flush().ok(); + } + + fn finish_analyze(&mut self, stats: &[graph::components::store::VersionStats]) { + println!( + "\rAnalyzed {} tables in {}s", + stats.len(), + self.analyze_start.elapsed().as_secs() + ); + show_stats(stats, HashSet::new()).ok(); + println!(""); + } + + fn copy_final_start(&mut self, earliest_block: BlockNumber, final_block: BlockNumber) { + println!("Copy final entities (versions live between {earliest_block} and {final_block})"); + print_copy_header(); + + self.final_start = Instant::now(); + self.table_start = self.final_start; + } + + fn copy_final_batch(&mut self, table: &str, _rows: usize, total_rows: usize, finished: bool) { + print_copy_row(table, total_rows, self.table_start.elapsed()); + if finished { + println!(""); + self.table_start = Instant::now(); + } + std::io::stdout().flush().ok(); + } + + fn copy_final_finish(&mut self) { + println!( + "Finished copying final entity versions in {}s\n", + self.final_start.elapsed().as_secs() + ); + } + + fn start_switch(&mut self) { + println!("Blocking writes and switching tables"); + print_copy_header(); + self.switch_start = Instant::now(); + } + + fn finish_switch(&mut self) { + println!( + "Enabling writes. Switching took {}s\n", + self.switch_start.elapsed().as_secs() + ); + } + + fn copy_nonfinal_start(&mut self, table: &str) { + print_copy_row(table, 0, Duration::from_secs(0)); + self.nonfinal_start = Instant::now(); + } + + fn copy_nonfinal_batch( + &mut self, + table: &str, + _rows: usize, + total_rows: usize, + finished: bool, + ) { + print_copy_row(table, total_rows, self.table_start.elapsed()); + if finished { + println!(""); + self.table_start = Instant::now(); + } + std::io::stdout().flush().ok(); + } + + fn finish_prune(&mut self) { + println!("Finished pruning in {}s", self.start.elapsed().as_secs()); + } +} + +pub async fn run( + store: Arc, + primary_pool: ConnectionPool, + search: DeploymentSearch, + history: usize, + prune_ratio: f64, +) -> Result<(), anyhow::Error> { + let history = history as BlockNumber; + let deployment = search.locate_unique(&primary_pool)?; + let mut info = store + .status(status::Filter::DeploymentIds(vec![deployment.id]))? + .pop() + .ok_or_else(|| anyhow!("deployment {deployment} not found"))?; + if info.chains.len() > 1 { + return Err(anyhow!( + "deployment {deployment} indexes {} chains, not sure how to deal with more than one chain", + info.chains.len() + )); + } + let status = info + .chains + .pop() + .ok_or_else(|| anyhow!("deployment {} does not index any chain", deployment))?; + let latest = status.latest_block.map(|ptr| ptr.number()).unwrap_or(0); + if latest <= history { + return Err(anyhow!("deployment {deployment} has only indexed up to block {latest} and we can't preserve {history} blocks of history")); + } + + println!("prune {deployment}"); + println!(" latest: {latest}"); + println!(" final: {}", latest - ETH_ENV.reorg_threshold); + println!(" earliest: {}\n", latest - history); + + let reporter = Box::new(Progress::new()); + store + .subgraph_store() + .prune( + reporter, + &deployment, + latest - history, + // Using the setting for eth chains is a bit lazy; the value + // should really depend on the chain, but we don't have a + // convenient way to figure out how each chain deals with + // finality + ETH_ENV.reorg_threshold, + prune_ratio, + ) + .await?; + + Ok(()) +} diff --git a/node/src/manager/commands/query.rs b/node/src/manager/commands/query.rs index a57ca149b61..262968eb022 100644 --- a/node/src/manager/commands/query.rs +++ b/node/src/manager/commands/query.rs @@ -58,9 +58,14 @@ pub async fn run( let query = Query::new( document, Some(QueryVariables::new(HashMap::from_iter(vars))), + true, ); let res = runner.run_query(query, target).await; + if let Some(err) = res.errors().first().cloned() { + return Err(err.into()); + } + if let Some(output) = output { let mut f = File::create(output)?; let json = serde_json::to_string(&res)?; diff --git a/node/src/manager/commands/remove.rs b/node/src/manager/commands/remove.rs index 36d0b614a6d..e89c3642215 100644 --- a/node/src/manager/commands/remove.rs +++ b/node/src/manager/commands/remove.rs @@ -3,9 +3,8 @@ use std::sync::Arc; use graph::prelude::{anyhow, Error, SubgraphName, SubgraphStore as _}; use graph_store_postgres::SubgraphStore; -pub fn run(store: Arc, name: String) -> Result<(), Error> { - let name = SubgraphName::new(name.clone()) - .map_err(|()| anyhow!("illegal subgraph name `{}`", name))?; +pub fn run(store: Arc, name: &str) -> Result<(), Error> { + let name = SubgraphName::new(name).map_err(|()| anyhow!("illegal subgraph name `{}`", name))?; println!("Removing subgraph {}", name); store.remove_subgraph(name)?; diff --git a/node/src/manager/commands/run.rs b/node/src/manager/commands/run.rs index df0db21325d..181978aac16 100644 --- a/node/src/manager/commands/run.rs +++ b/node/src/manager/commands/run.rs @@ -2,36 +2,33 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; -use crate::chain::create_firehose_networks; -use crate::config::{Config, ProviderDetails}; +use crate::chain::{ + connect_ethereum_networks, create_ethereum_networks_for_chain, create_firehose_networks, + create_ipfs_clients, +}; +use crate::config::Config; use crate::manager::PanicSubscriptionManager; use crate::store_builder::StoreBuilder; use crate::MetricsContext; -use ethereum::chain::{EthereumAdapterSelector, EthereumStreamBuilder}; -use ethereum::{EthereumNetworks, ProviderEthRpcMetrics, RuntimeAdapter as EthereumRuntimeAdapter}; -use futures::future::join_all; -use futures::TryFutureExt; -use graph::anyhow::{bail, format_err, Error}; -use graph::blockchain::{BlockchainKind, BlockchainMap, ChainIdentifier}; +use ethereum::chain::{EthereumAdapterSelector, EthereumBlockRefetcher, EthereumStreamBuilder}; +use ethereum::{ProviderEthRpcMetrics, RuntimeAdapter as EthereumRuntimeAdapter}; +use graph::anyhow::{bail, format_err}; +use graph::blockchain::{BlockchainKind, BlockchainMap}; use graph::cheap_clone::CheapClone; use graph::components::store::{BlockStore as _, DeploymentLocator}; use graph::env::EnvVars; use graph::firehose::FirehoseEndpoints; -use graph::ipfs_client::IpfsClient; use graph::prelude::{ - anyhow, tokio, BlockNumber, DeploymentHash, LoggerFactory, - MetricsRegistry as MetricsRegistryTrait, NodeId, SubgraphAssignmentProvider, SubgraphName, - SubgraphRegistrar, SubgraphStore, SubgraphVersionSwitchingMode, ENV_VARS, + anyhow, tokio, BlockNumber, DeploymentHash, LoggerFactory, NodeId, SubgraphAssignmentProvider, + SubgraphName, SubgraphRegistrar, SubgraphStore, SubgraphVersionSwitchingMode, ENV_VARS, }; -use graph::slog::{debug, error, info, o, Logger}; -use graph::util::security::SafeDisplay; -use graph_chain_ethereum::{self as ethereum, EthereumAdapterTrait, Transport}; -use graph_core::polling_monitor::ipfs_service::IpfsService; +use graph::slog::{debug, info, Logger}; +use graph_chain_ethereum as ethereum; +use graph_core::polling_monitor::ipfs_service; use graph_core::{ LinkResolver, SubgraphAssignmentProvider as IpfsSubgraphAssignmentProvider, SubgraphInstanceManager, SubgraphRegistrar as IpfsSubgraphRegistrar, }; -use url::Url; fn locate(store: &dyn SubgraphStore, hash: &str) -> Result { let mut locators = store.locators(&hash)?; @@ -58,34 +55,29 @@ pub async fn run( subgraph, stop_block ); + let env_vars = Arc::new(EnvVars::from_env().unwrap()); let metrics_registry = metrics_ctx.registry.clone(); - let logger_factory = LoggerFactory::new(logger.clone(), None); + let logger_factory = LoggerFactory::new(logger.clone(), None, metrics_ctx.registry.clone()); // FIXME: Hard-coded IPFS config, take it from config file instead? let ipfs_clients: Vec<_> = create_ipfs_clients(&logger, &ipfs_url); let ipfs_client = ipfs_clients.first().cloned().expect("Missing IPFS client"); - let ipfs_service = IpfsService::new( + let ipfs_service = ipfs_service( ipfs_client, - ENV_VARS.mappings.max_ipfs_file_bytes as u64, - ENV_VARS.mappings.ipfs_timeout, - ENV_VARS.mappings.max_ipfs_concurrent_requests, + env_vars.mappings.max_ipfs_file_bytes as u64, + env_vars.mappings.ipfs_timeout, + env_vars.mappings.ipfs_request_limit, ); // Convert the clients into a link resolver. Since we want to get past // possible temporary DNS failures, make the resolver retry - let link_resolver = Arc::new(LinkResolver::new( - ipfs_clients, - Arc::new(EnvVars::default()), - )); + let link_resolver = Arc::new(LinkResolver::new(ipfs_clients, env_vars.cheap_clone())); - let eth_networks = create_ethereum_networks( - logger.clone(), - metrics_registry.clone(), - &config, - &network_name, - ) - .await - .expect("Failed to parse Ethereum networks"); + let eth_rpc_metrics = Arc::new(ProviderEthRpcMetrics::new(metrics_registry.clone())); + let eth_networks = + create_ethereum_networks_for_chain(&logger, eth_rpc_metrics, &config, &network_name) + .await + .expect("Failed to parse Ethereum networks"); let firehose_networks_by_kind = create_firehose_networks(logger.clone(), &config); let firehose_networks = firehose_networks_by_kind.get(&BlockchainKind::Ethereum); let firehose_endpoints = firehose_networks @@ -133,6 +125,7 @@ pub async fn run( eth_adapters.clone(), chain_head_update_listener, Arc::new(EthereumStreamBuilder {}), + Arc::new(EthereumBlockRefetcher {}), Arc::new(EthereumAdapterSelector::new( logger_factory.clone(), Arc::new(eth_adapters), @@ -157,6 +150,7 @@ pub async fn run( let blockchain_map = Arc::new(blockchain_map); let subgraph_instance_manager = SubgraphInstanceManager::new( &logger_factory, + env_vars.cheap_clone(), subgraph_store.clone(), blockchain_map.clone(), metrics_registry.clone(), @@ -264,226 +258,3 @@ pub async fn run( Ok(()) } - -// Stuff copied directly moslty from `main.rs` -// -// FIXME: Share that with `main.rs` stuff - -// The status of a provider that we learned from connecting to it -#[derive(PartialEq)] -enum ProviderNetworkStatus { - Broken { - network: String, - provider: String, - }, - Version { - network: String, - ident: ChainIdentifier, - }, -} - -/// How long we will hold up node startup to get the net version and genesis -/// hash from the client. If we can't get it within that time, we'll try and -/// continue regardless. -const NET_VERSION_WAIT_TIME: Duration = Duration::from_secs(30); - -fn create_ipfs_clients(logger: &Logger, ipfs_addresses: &Vec) -> Vec { - // Parse the IPFS URL from the `--ipfs` command line argument - let ipfs_addresses: Vec<_> = ipfs_addresses - .iter() - .map(|uri| { - if uri.starts_with("http://") || uri.starts_with("https://") { - String::from(uri) - } else { - format!("http://{}", uri) - } - }) - .collect(); - - ipfs_addresses - .into_iter() - .map(|ipfs_address| { - info!( - logger, - "Trying IPFS node at: {}", - SafeDisplay(&ipfs_address) - ); - - let ipfs_client = match IpfsClient::new(&ipfs_address) { - Ok(ipfs_client) => ipfs_client, - Err(e) => { - error!( - logger, - "Failed to create IPFS client for `{}`: {}", - SafeDisplay(&ipfs_address), - e - ); - panic!("Could not connect to IPFS"); - } - }; - - // Test the IPFS client by getting the version from the IPFS daemon - let ipfs_test = ipfs_client.cheap_clone(); - let ipfs_ok_logger = logger.clone(); - let ipfs_err_logger = logger.clone(); - let ipfs_address_for_ok = ipfs_address.clone(); - let ipfs_address_for_err = ipfs_address.clone(); - graph::spawn(async move { - ipfs_test - .test() - .map_err(move |e| { - error!( - ipfs_err_logger, - "Is there an IPFS node running at \"{}\"?", - SafeDisplay(ipfs_address_for_err), - ); - panic!("Failed to connect to IPFS: {}", e); - }) - .map_ok(move |_| { - info!( - ipfs_ok_logger, - "Successfully connected to IPFS node at: {}", - SafeDisplay(ipfs_address_for_ok) - ); - }) - .await - }); - - ipfs_client - }) - .collect() -} - -/// Parses an Ethereum connection string and returns the network name and Ethereum adapter. -pub async fn create_ethereum_networks( - logger: Logger, - registry: Arc, - config: &Config, - network_name: &str, -) -> Result { - let eth_rpc_metrics = Arc::new(ProviderEthRpcMetrics::new(registry)); - let mut parsed_networks = EthereumNetworks::new(); - let chain = config - .chains - .chains - .get(network_name) - .ok_or_else(|| anyhow!("unknown network {}", network_name))?; - if chain.protocol == BlockchainKind::Ethereum { - for provider in &chain.providers { - if let ProviderDetails::Web3(web3) = &provider.details { - let capabilities = web3.node_capabilities(); - - let logger = logger.new(o!("provider" => provider.label.clone())); - info!( - logger, - "Creating transport"; - "url" => &web3.url, - "capabilities" => capabilities - ); - - use crate::config::Transport::*; - - let transport = match web3.transport { - Rpc => Transport::new_rpc(Url::parse(&web3.url)?, web3.headers.clone()), - Ipc => Transport::new_ipc(&web3.url).await, - Ws => Transport::new_ws(&web3.url).await, - }; - - let supports_eip_1898 = !web3.features.contains("no_eip1898"); - - parsed_networks.insert( - network_name.to_string(), - capabilities, - Arc::new( - graph_chain_ethereum::EthereumAdapter::new( - logger, - provider.label.clone(), - &web3.url, - transport, - eth_rpc_metrics.clone(), - supports_eip_1898, - ) - .await, - ), - web3.limit_for(&config.node), - ); - } - } - } - parsed_networks.sort(); - Ok(parsed_networks) -} - -/// Try to connect to all the providers in `eth_networks` and get their net -/// version and genesis block. Return the same `eth_networks` and the -/// retrieved net identifiers grouped by network name. Remove all providers -/// for which trying to connect resulted in an error from the returned -/// `EthereumNetworks`, since it's likely pointless to try and connect to -/// them. If the connection attempt to a provider times out after -/// `NET_VERSION_WAIT_TIME`, keep the provider, but don't report a -/// version for it. -async fn connect_ethereum_networks( - logger: &Logger, - mut eth_networks: EthereumNetworks, -) -> (EthereumNetworks, Vec<(String, Vec)>) { - // This has one entry for each provider, and therefore multiple entries - // for each network - let statuses = join_all( - eth_networks - .flatten() - .into_iter() - .map(|(network_name, capabilities, eth_adapter)| { - (network_name, capabilities, eth_adapter, logger.clone()) - }) - .map(|(network, capabilities, eth_adapter, logger)| async move { - let logger = logger.new(o!("provider" => eth_adapter.provider().to_string())); - info!( - logger, "Connecting to Ethereum to get network identifier"; - "capabilities" => &capabilities - ); - match tokio::time::timeout(NET_VERSION_WAIT_TIME, eth_adapter.net_identifiers()) - .await - .map_err(Error::from) - { - // An `Err` means a timeout, an `Ok(Err)` means some other error (maybe a typo - // on the URL) - Ok(Err(e)) | Err(e) => { - error!(logger, "Connection to provider failed. Not using this provider"; - "error" => e.to_string()); - ProviderNetworkStatus::Broken { - network, - provider: eth_adapter.provider().to_string(), - } - } - Ok(Ok(ident)) => { - info!( - logger, - "Connected to Ethereum"; - "network_version" => &ident.net_version, - "capabilities" => &capabilities - ); - ProviderNetworkStatus::Version { network, ident } - } - } - }), - ) - .await; - - // Group identifiers by network name - let idents: HashMap> = - statuses - .into_iter() - .fold(HashMap::new(), |mut networks, status| { - match status { - ProviderNetworkStatus::Broken { network, provider } => { - eth_networks.remove(&network, &provider) - } - ProviderNetworkStatus::Version { network, ident } => { - networks.entry(network.to_string()).or_default().push(ident) - } - } - networks - }); - let idents: Vec<_> = idents.into_iter().collect(); - (eth_networks, idents) -} diff --git a/node/src/manager/commands/stats.rs b/node/src/manager/commands/stats.rs index be8799b98b3..1b4e3b5902e 100644 --- a/node/src/manager/commands/stats.rs +++ b/node/src/manager/commands/stats.rs @@ -1,15 +1,14 @@ use std::collections::HashMap; +use std::collections::HashSet; use std::sync::Arc; use crate::manager::deployment::DeploymentSearch; use diesel::r2d2::ConnectionManager; use diesel::r2d2::PooledConnection; -use diesel::sql_query; -use diesel::sql_types::{Integer, Text}; use diesel::PgConnection; -use diesel::RunQueryDsl; +use graph::components::store::DeploymentLocator; +use graph::components::store::VersionStats; use graph::prelude::anyhow; -use graph::prelude::anyhow::bail; use graph_store_postgres::command_support::catalog as store_catalog; use graph_store_postgres::command_support::catalog::Site; use graph_store_postgres::connection_pool::ConnectionPool; @@ -52,104 +51,151 @@ pub async fn account_like( Ok(()) } -pub fn show( - pools: HashMap, - search: &DeploymentSearch, - table: Option, -) -> Result<(), anyhow::Error> { - let (site, conn) = site_and_conn(pools, search)?; +pub fn abbreviate_table_name(table: &str, size: usize) -> String { + if table.len() > size { + let fragment = size / 2 - 2; + let last = table.len() - fragment; + let mut table = table.to_string(); + table.replace_range(fragment..last, ".."); + let table = table.trim().to_string(); + table + } else { + table.to_string() + } +} - #[derive(Queryable, QueryableByName)] - struct VersionStats { - #[sql_type = "Integer"] - entities: i32, - #[sql_type = "Integer"] - versions: i32, - #[sql_type = "Text"] - tablename: String, +pub fn show_stats( + stats: &[VersionStats], + account_like: HashSet, +) -> Result<(), anyhow::Error> { + fn header() { + println!( + "{:^30} | {:^10} | {:^10} | {:^7}", + "table", "entities", "versions", "ratio" + ); + println!("{:-^30}-+-{:-^10}-+-{:-^10}-+-{:-^7}", "", "", "", ""); } - impl VersionStats { - fn header() { - println!( - "{:^30} | {:^10} | {:^10} | {:^7}", - "table", "entities", "versions", "ratio" - ); - println!("{:-^30}-+-{:-^10}-+-{:-^10}-+-{:-^7}", "", "", "", ""); - } + fn footer() { + println!(" (a): account-like flag set"); + } - fn print(&self, account_like: bool) { - println!( - "{:<26} {:3} | {:>10} | {:>10} | {:>5.1}%", - self.tablename, - if account_like { "(a)" } else { " " }, - self.entities, - self.versions, - self.entities as f32 * 100.0 / self.versions as f32 - ); - } + fn print_stats(s: &VersionStats, account_like: bool) { + println!( + "{:<26} {:3} | {:>10} | {:>10} | {:>5.1}%", + abbreviate_table_name(&s.tablename, 26), + if account_like { "(a)" } else { " " }, + s.entities, + s.versions, + s.ratio * 100.0 + ); + } - fn footer() { - println!(" (a): account-like flag set"); - } + header(); + for s in stats { + print_stats(s, account_like.contains(&s.tablename)); + } + if !account_like.is_empty() { + footer(); } - let query = format!( - "select s.n_distinct::int4 as entities, - c.reltuples::int4 as versions, - c.relname as tablename - from pg_namespace n, pg_class c, pg_stats s - where n.nspname = $1 - and c.relnamespace = n.oid - and s.schemaname = n.nspname - and s.attname = 'id' - and c.relname = s.tablename - order by c.relname" - ); - let stats = sql_query(query) - .bind::(&site.namespace.as_str()) - .load::(&conn)?; + Ok(()) +} - let account_like = store_catalog::account_like(&conn, &site)?; +pub fn show( + pools: HashMap, + search: &DeploymentSearch, +) -> Result<(), anyhow::Error> { + let (site, conn) = site_and_conn(pools, search)?; - VersionStats::header(); - for stat in &stats { - stat.print(account_like.contains(&stat.tablename)); - } - VersionStats::footer(); - - if let Some(table) = table { - if !stats.iter().any(|stat| stat.tablename == table) { - bail!( - "deployment {} does not have a table `{}`", - site.namespace, - table - ); - } + let stats = store_catalog::stats(&conn, &site.namespace)?; - println!("doing a full count on {}.{} ...", site.namespace, table); - let query = format!( - "select count(distinct id)::int4 as entities, - count(*)::int4 as versions, - '{table}' as tablename - from {nsp}.{table}", - nsp = &site.namespace, - table = table - ); - let stat = sql_query(query).get_result::(&conn)?; - stat.print(account_like.contains(&stat.tablename)); - } + let account_like = store_catalog::account_like(&conn, &site)?; - Ok(()) + show_stats(stats.as_slice(), account_like) } pub fn analyze( store: Arc, pool: ConnectionPool, search: DeploymentSearch, - entity_name: &str, + entity_name: Option<&str>, ) -> Result<(), anyhow::Error> { let locator = search.locate_unique(&pool)?; - println!("Analyzing table sgd{}.{entity_name}", locator.id); + analyze_loc(store, &locator, entity_name) +} + +fn analyze_loc( + store: Arc, + locator: &DeploymentLocator, + entity_name: Option<&str>, +) -> Result<(), anyhow::Error> { + match entity_name { + Some(entity_name) => println!("Analyzing table sgd{}.{entity_name}", locator.id), + None => println!("Analyzing all tables for sgd{}", locator.id), + } store.analyze(&locator, entity_name).map_err(|e| anyhow!(e)) } + +pub fn target( + store: Arc, + primary: ConnectionPool, + search: &DeploymentSearch, +) -> Result<(), anyhow::Error> { + let locator = search.locate_unique(&primary)?; + let (default, targets) = store.stats_targets(&locator)?; + + let has_targets = targets + .values() + .any(|cols| cols.values().any(|target| *target > 0)); + + if has_targets { + println!( + "{:^74}", + format!( + "Statistics targets for sgd{} (default: {default})", + locator.id + ) + ); + println!("{:^30} | {:^30} | {:^8}", "table", "column", "target"); + println!("{:-^30}-+-{:-^30}-+-{:-^8}", "", "", ""); + for (table, columns) in targets { + for (column, target) in columns { + if target > 0 { + println!("{:<30} | {:<30} | {:>8}", table, column, target); + } + } + } + } else { + println!( + "no statistics targets set for sgd{}, global default is {default}", + locator.id + ); + } + Ok(()) +} + +pub fn set_target( + store: Arc, + primary: ConnectionPool, + search: &DeploymentSearch, + entity: Option<&str>, + columns: Vec, + target: i32, + no_analyze: bool, +) -> Result<(), anyhow::Error> { + let columns = if columns.is_empty() { + vec!["id".to_string(), "block_range".to_string()] + } else { + columns + }; + + let locator = search.locate_unique(&primary)?; + + store.set_stats_target(&locator, entity, columns, target)?; + + if !no_analyze { + analyze_loc(store, &locator, entity)?; + } + Ok(()) +} diff --git a/node/src/manager/commands/unused_deployments.rs b/node/src/manager/commands/unused_deployments.rs index 632b2053e4e..7351d32d8c8 100644 --- a/node/src/manager/commands/unused_deployments.rs +++ b/node/src/manager/commands/unused_deployments.rs @@ -76,7 +76,7 @@ pub fn record(store: Arc) -> Result<(), Error> { pub fn remove( store: Arc, count: usize, - deployment: Option, + deployment: Option<&str>, older: Option, ) -> Result<(), Error> { let filter = match older { @@ -88,7 +88,7 @@ pub fn remove( None => unused, Some(deployment) => unused .into_iter() - .filter(|u| u.deployment.as_str() == deployment) + .filter(|u| &u.deployment == deployment) .collect::>(), }; diff --git a/node/src/manager/deployment.rs b/node/src/manager/deployment.rs index db17db10f24..5693c050f4c 100644 --- a/node/src/manager/deployment.rs +++ b/node/src/manager/deployment.rs @@ -4,16 +4,12 @@ use std::str::FromStr; use diesel::{dsl::sql, prelude::*}; use diesel::{sql_types::Text, PgConnection}; -use regex::Regex; use graph::components::store::DeploymentId; use graph::{ components::store::DeploymentLocator, data::subgraph::status, - prelude::{ - anyhow::{self}, - lazy_static, DeploymentHash, - }, + prelude::{anyhow, lazy_static, regex::Regex, DeploymentHash}, }; use graph_store_postgres::command_support::catalog as store_catalog; use graph_store_postgres::connection_pool::ConnectionPool; @@ -123,6 +119,32 @@ impl DeploymentSearch { Ok(deployments) } + /// Finds all [`Deployment`]s for this [`DeploymentSearch`]. + pub fn find( + &self, + pool: ConnectionPool, + current: bool, + pending: bool, + used: bool, + ) -> Result, anyhow::Error> { + let current = current || used; + let pending = pending || used; + + let deployments = self.lookup(&pool)?; + // Filter by status; if neither `current` or `pending` are set, list + // all deployments + let deployments: Vec<_> = deployments + .into_iter() + .filter(|deployment| match (current, pending) { + (true, false) => deployment.status == "current", + (false, true) => deployment.status == "pending", + (true, true) => deployment.status == "current" || deployment.status == "pending", + (false, false) => true, + }) + .collect(); + Ok(deployments) + } + /// Finds a single deployment locator for the given deployment identifier. pub fn locate_unique(&self, pool: &ConnectionPool) -> anyhow::Result { let mut locators: Vec = HashSet::::from_iter( diff --git a/node/src/manager/mod.rs b/node/src/manager/mod.rs index 37bebf9b72c..b2eccaf6e9a 100644 --- a/node/src/manager/mod.rs +++ b/node/src/manager/mod.rs @@ -2,13 +2,15 @@ use std::collections::BTreeSet; use graph::{ components::store::{SubscriptionManager, UnitStream}, - prelude::{StoreEventStreamBox, SubscriptionFilter}, + prelude::{anyhow, StoreEventStreamBox, SubscriptionFilter}, }; pub mod catalog; +pub mod color; pub mod commands; pub mod deployment; mod display; +pub mod prompt; /// A dummy subscription manager that always panics pub struct PanicSubscriptionManager; @@ -22,3 +24,5 @@ impl SubscriptionManager for PanicSubscriptionManager { panic!("we were never meant to call `subscribe_no_payload`"); } } + +pub type CmdResult = Result<(), anyhow::Error>; diff --git a/node/src/manager/prompt.rs b/node/src/manager/prompt.rs new file mode 100644 index 00000000000..be35cce4821 --- /dev/null +++ b/node/src/manager/prompt.rs @@ -0,0 +1,17 @@ +use graph::anyhow; +use std::io::{self, Write}; + +/// Asks users if they are certain about a certain action. +pub fn prompt_for_confirmation(prompt: &str) -> anyhow::Result { + print!("{prompt} [y/N] "); + io::stdout().flush()?; + + let mut answer = String::new(); + io::stdin().read_line(&mut answer)?; + answer.make_ascii_lowercase(); + + match answer.trim() { + "y" | "yes" => Ok(true), + _ => Ok(false), + } +} diff --git a/node/src/opt.rs b/node/src/opt.rs index 785b73f517e..321cf47b0cd 100644 --- a/node/src/opt.rs +++ b/node/src/opt.rs @@ -1,6 +1,6 @@ +use clap::Parser; use git_testament::{git_testament, render_testament}; use lazy_static::lazy_static; -use structopt::StructOpt; use crate::config; @@ -9,15 +9,15 @@ lazy_static! { static ref RENDERED_TESTAMENT: String = render_testament!(TESTAMENT); } -#[derive(Clone, Debug, StructOpt)] -#[structopt( +#[derive(Clone, Debug, Parser)] +#[clap( name = "graph-node", about = "Scalable queries for a decentralized future", author = "Graph Protocol, Inc.", version = RENDERED_TESTAMENT.as_str() )] pub struct Opt { - #[structopt( + #[clap( long, env = "GRAPH_NODE_CONFIG", conflicts_with_all = &["postgres-url", "postgres-secondary-hosts", "postgres-host-weights"], @@ -25,9 +25,9 @@ pub struct Opt { help = "the name of the configuration file", )] pub config: Option, - #[structopt(long, help = "validate the configuration and exit")] + #[clap(long, help = "validate the configuration and exit")] pub check_config: bool, - #[structopt( + #[clap( long, value_name = "[NAME:]IPFS_HASH", env = "SUBGRAPH", @@ -35,14 +35,15 @@ pub struct Opt { )] pub subgraph: Option, - #[structopt( + #[clap( long, + env = "GRAPH_START_BLOCK", value_name = "BLOCK_HASH:BLOCK_NUMBER", help = "block hash and number that the subgraph passed will start indexing at" )] pub start_block: Option, - #[structopt( + #[clap( long, value_name = "URL", env = "POSTGRES_URL", @@ -51,7 +52,7 @@ pub struct Opt { help = "Location of the Postgres database used for storing entities" )] pub postgres_url: Option, - #[structopt( + #[clap( long, value_name = "URL,", use_delimiter = true, @@ -62,7 +63,7 @@ pub struct Opt { )] // FIXME: Make sure delimiter is ',' pub postgres_secondary_hosts: Vec, - #[structopt( + #[clap( long, value_name = "WEIGHT,", use_delimiter = true, @@ -74,7 +75,7 @@ pub struct Opt { Defaults to weight 1 for each host" )] pub postgres_host_weights: Vec, - #[structopt( + #[clap( long, min_values=0, required_unless_one = &["ethereum-ws", "ethereum-ipc", "config"], @@ -84,7 +85,7 @@ pub struct Opt { help= "Ethereum network name (e.g. 'mainnet'), optional comma-seperated capabilities (eg 'full,archive'), and an Ethereum RPC URL, separated by a ':'", )] pub ethereum_rpc: Vec, - #[structopt(long, min_values=0, + #[clap(long, min_values=0, required_unless_one = &["ethereum-rpc", "ethereum-ipc", "config"], conflicts_with_all = &["ethereum-rpc", "ethereum-ipc", "config"], value_name="NETWORK_NAME:[CAPABILITIES]:URL", @@ -92,7 +93,7 @@ pub struct Opt { help= "Ethereum network name (e.g. 'mainnet'), optional comma-seperated capabilities (eg 'full,archive`, and an Ethereum WebSocket URL, separated by a ':'", )] pub ethereum_ws: Vec, - #[structopt(long, min_values=0, + #[clap(long, min_values=0, required_unless_one = &["ethereum-rpc", "ethereum-ws", "config"], conflicts_with_all = &["ethereum-rpc", "ethereum-ws", "config"], value_name="NETWORK_NAME:[CAPABILITIES]:FILE", @@ -100,14 +101,14 @@ pub struct Opt { help= "Ethereum network name (e.g. 'mainnet'), optional comma-seperated capabilities (eg 'full,archive'), and an Ethereum IPC pipe, separated by a ':'", )] pub ethereum_ipc: Vec, - #[structopt( + #[clap( long, value_name = "HOST:PORT", env = "IPFS", help = "HTTP addresses of IPFS nodes" )] pub ipfs: Vec, - #[structopt( + #[clap( long, default_value = "8000", value_name = "PORT", @@ -115,14 +116,14 @@ pub struct Opt { env = "GRAPH_GRAPHQL_HTTP_PORT" )] pub http_port: u16, - #[structopt( + #[clap( long, default_value = "8030", value_name = "PORT", help = "Port for the index node server" )] pub index_node_port: u16, - #[structopt( + #[clap( long, default_value = "8001", value_name = "PORT", @@ -130,21 +131,21 @@ pub struct Opt { env = "GRAPH_GRAPHQL_WS_PORT" )] pub ws_port: u16, - #[structopt( + #[clap( long, default_value = "8020", value_name = "PORT", help = "Port for the JSON-RPC admin server" )] pub admin_port: u16, - #[structopt( + #[clap( long, default_value = "8040", value_name = "PORT", help = "Port for the Prometheus metrics server" )] pub metrics_port: u16, - #[structopt( + #[clap( long, default_value = "default", value_name = "NODE_ID", @@ -152,7 +153,7 @@ pub struct Opt { help = "a unique identifier for this node. Should have the same value between consecutive node restarts" )] pub node_id: String, - #[structopt( + #[clap( long, value_name = "FILE", env = "GRAPH_NODE_EXPENSIVE_QUERIES_FILE", @@ -160,24 +161,24 @@ pub struct Opt { help = "a file with a list of expensive queries, one query per line. Attempts to run these queries will return a QueryExecutionError::TooExpensive to clients" )] pub expensive_queries_filename: String, - #[structopt(long, help = "Enable debug logging")] + #[clap(long, help = "Enable debug logging")] pub debug: bool, - #[structopt( + #[clap( long, value_name = "URL", env = "ELASTICSEARCH_URL", help = "Elasticsearch service to write subgraph logs to" )] pub elasticsearch_url: Option, - #[structopt( + #[clap( long, value_name = "USER", env = "ELASTICSEARCH_USER", help = "User to use for Elasticsearch logging" )] pub elasticsearch_user: Option, - #[structopt( + #[clap( long, value_name = "PASSWORD", env = "ELASTICSEARCH_PASSWORD", @@ -185,7 +186,7 @@ pub struct Opt { help = "Password to use for Elasticsearch logging" )] pub elasticsearch_password: Option, - #[structopt( + #[clap( long, value_name = "MILLISECONDS", default_value = "1000", @@ -193,14 +194,14 @@ pub struct Opt { help = "How often to poll the Ethereum node for new blocks" )] pub ethereum_polling_interval: u64, - #[structopt( + #[clap( long, value_name = "DISABLE_BLOCK_INGESTOR", env = "DISABLE_BLOCK_INGESTOR", help = "Ensures that the block ingestor component does not execute" )] pub disable_block_ingestor: bool, - #[structopt( + #[clap( long, value_name = "STORE_CONNECTION_POOL_SIZE", default_value = "10", @@ -208,20 +209,26 @@ pub struct Opt { help = "Limits the number of connections in the store's connection pool" )] pub store_connection_pool_size: u32, - #[structopt( + #[clap( long, help = "Allows setting configurations that may result in incorrect Proofs of Indexing." )] pub unsafe_config: bool, - #[structopt( + #[clap( long, value_name = "IPFS_HASH", + env = "GRAPH_DEBUG_FORK", help = "IPFS hash of the subgraph manifest that you want to fork" )] pub debug_fork: Option, - #[structopt(long, value_name = "URL", help = "Base URL for forking subgraphs")] + #[clap( + long, + value_name = "URL", + env = "GRAPH_FORK_BASE", + help = "Base URL for forking subgraphs" + )] pub fork_base: Option, } diff --git a/node/src/store_builder.rs b/node/src/store_builder.rs index 205f182d06f..7a7b139c21b 100644 --- a/node/src/store_builder.rs +++ b/node/src/store_builder.rs @@ -9,7 +9,9 @@ use graph::{ prelude::{info, CheapClone, Logger}, util::security::SafeDisplay, }; -use graph_store_postgres::connection_pool::{ConnectionPool, ForeignServer, PoolName}; +use graph_store_postgres::connection_pool::{ + ConnectionPool, ForeignServer, PoolCoordinator, PoolName, +}; use graph_store_postgres::{ BlockStore as DieselBlockStore, ChainHeadUpdateListener as PostgresChainHeadUpdateListener, NotificationSender, Shard as ShardName, Store as DieselStore, SubgraphStore, @@ -26,6 +28,7 @@ pub struct StoreBuilder { chain_head_update_listener: Arc, /// Map network names to the shards where they are/should be stored chains: HashMap, + pub coord: Arc, } impl StoreBuilder { @@ -47,7 +50,7 @@ impl StoreBuilder { registry.clone(), )); - let (store, pools) = Self::make_subgraph_store_and_pools( + let (store, pools, coord) = Self::make_subgraph_store_and_pools( logger, node, config, @@ -80,6 +83,7 @@ impl StoreBuilder { subscription_manager, chain_head_update_listener, chains, + coord, } } @@ -92,7 +96,11 @@ impl StoreBuilder { config: &Config, fork_base: Option, registry: Arc, - ) -> (Arc, HashMap) { + ) -> ( + Arc, + HashMap, + Arc, + ) { let notification_sender = Arc::new(NotificationSender::new(registry.cheap_clone())); let servers = config @@ -102,6 +110,7 @@ impl StoreBuilder { .collect::, _>>() .expect("connection url's contain enough detail"); let servers = Arc::new(servers); + let coord = Arc::new(PoolCoordinator::new(servers)); let shards: Vec<_> = config .stores @@ -114,7 +123,7 @@ impl StoreBuilder { name, shard, registry.cheap_clone(), - servers.clone(), + coord.clone(), ); let (read_only_conn_pools, weights) = Self::replica_pools( @@ -123,7 +132,7 @@ impl StoreBuilder { name, shard, registry.cheap_clone(), - servers.clone(), + coord.clone(), ); let name = @@ -147,7 +156,7 @@ impl StoreBuilder { registry, )); - (store, pools) + (store, pools, coord) } pub fn make_store( @@ -191,7 +200,7 @@ impl StoreBuilder { name: &str, shard: &Shard, registry: Arc, - servers: Arc>, + coord: Arc, ) -> ConnectionPool { let logger = logger.new(o!("pool" => "main")); let pool_size = shard.pool_size.size_for(node, name).expect(&format!( @@ -209,15 +218,14 @@ impl StoreBuilder { "conn_pool_size" => pool_size, "weight" => shard.weight ); - ConnectionPool::create( + coord.create_pool( + &logger, name, PoolName::Main, shard.connection.to_owned(), pool_size, Some(fdw_pool_size), - &logger, registry.cheap_clone(), - servers, ) } @@ -228,7 +236,7 @@ impl StoreBuilder { name: &str, shard: &Shard, registry: Arc, - servers: Arc>, + coord: Arc, ) -> (Vec, Vec) { let mut weights: Vec<_> = vec![shard.weight]; ( @@ -250,15 +258,15 @@ impl StoreBuilder { "we can determine the pool size for replica {}", name )); - ConnectionPool::create( + + coord.clone().create_pool( + &logger, name, PoolName::Replica(pool), replica.connection.clone(), pool_size, None, - &logger, registry.cheap_clone(), - servers.clone(), ) }) .collect(), diff --git a/runtime/derive/Cargo.toml b/runtime/derive/Cargo.toml index 13013d7e01a..c78a5441897 100644 --- a/runtime/derive/Cargo.toml +++ b/runtime/derive/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "graph-runtime-derive" -version = "0.27.0" -edition = "2021" +version.workspace = true +edition.workspace = true [lib] proc-macro = true @@ -9,5 +9,5 @@ proc-macro = true [dependencies] syn = { version = "1.0.98", features = ["full"] } quote = "1.0" -proc-macro2 = "1.0.43" +proc-macro2 = "1.0.51" heck = "0.4" diff --git a/runtime/test/Cargo.toml b/runtime/test/Cargo.toml index cd85bbc4226..8e2a56725c2 100644 --- a/runtime/test/Cargo.toml +++ b/runtime/test/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "graph-runtime-test" -version = "0.27.0" -edition = "2021" +version.workspace = true +edition.workspace = true [dependencies] semver = "1.0" diff --git a/runtime/test/src/common.rs b/runtime/test/src/common.rs index 8b0d77b4aa5..50360e2a7d2 100644 --- a/runtime/test/src/common.rs +++ b/runtime/test/src/common.rs @@ -140,7 +140,7 @@ pub fn mock_data_source(path: &str, api_version: Version) -> DataSource { link: Link { link: "link".to_owned(), }, - runtime: Arc::new(runtime.clone()), + runtime: Arc::new(runtime), }, context: Default::default(), creation_block: None, diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index cad61b5c897..939daf0e074 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -423,10 +423,7 @@ fn make_thing(id: &str, value: &str) -> (String, EntityModification) { data.set("id", id); data.set("value", value); data.set("extra", USER_DATA); - let key = EntityKey { - entity_type: EntityType::new("Thing".to_string()), - entity_id: id.into(), - }; + let key = EntityKey::data("Thing".to_string(), id); ( format!("{{ \"id\": \"{}\", \"value\": \"{}\"}}", id, value), EntityModification::Insert { key, data }, diff --git a/runtime/wasm/Cargo.toml b/runtime/wasm/Cargo.toml index 32818eb8697..9393385a452 100644 --- a/runtime/wasm/Cargo.toml +++ b/runtime/wasm/Cargo.toml @@ -1,20 +1,20 @@ [package] name = "graph-runtime-wasm" -version = "0.27.0" -edition = "2021" +version.workspace = true +edition.workspace = true [dependencies] async-trait = "0.1.50" -atomic_refcell = "0.1.8" +atomic_refcell = "0.1.9" ethabi = "17.2" futures = "0.1.21" hex = "0.4.3" graph = { path = "../../graph" } bs58 = "0.4.0" graph-runtime-derive = { path = "../derive" } -semver = "1.0.12" +semver = "1.0.16" lazy_static = "1.4" -uuid = { version = "1.1.2", features = ["v4"] } +uuid = { version = "1.2.2", features = ["v4"] } strum = "0.21.0" strum_macros = "0.21.1" bytes = "1.0" diff --git a/runtime/wasm/src/asc_abi/class.rs b/runtime/wasm/src/asc_abi/class.rs index 77ec457f127..5298eee76cb 100644 --- a/runtime/wasm/src/asc_abi/class.rs +++ b/runtime/wasm/src/asc_abi/class.rs @@ -705,7 +705,7 @@ impl AscIndexId for AscResult>, bool> { } #[repr(C)] -#[derive(AscType)] +#[derive(AscType, Copy, Clone)] pub struct AscWrapped { pub inner: V, } @@ -721,11 +721,3 @@ impl AscIndexId for AscWrapped { impl AscIndexId for AscWrapped>> { const INDEX_ASC_TYPE_ID: IndexForAscTypeId = IndexForAscTypeId::WrappedJsonValue; } - -impl Copy for AscWrapped {} - -impl Clone for AscWrapped { - fn clone(&self) -> Self { - Self { inner: self.inner } - } -} diff --git a/runtime/wasm/src/asc_abi/v0_0_4.rs b/runtime/wasm/src/asc_abi/v0_0_4.rs index 92f14edf362..7be7a276f59 100644 --- a/runtime/wasm/src/asc_abi/v0_0_4.rs +++ b/runtime/wasm/src/asc_abi/v0_0_4.rs @@ -116,7 +116,7 @@ impl AscType for ArrayBuffer { DeterministicHostError::from(anyhow!("Attempted to read past end of array")) })?; Ok(ArrayBuffer { - byte_length: u32::from_asc_bytes(&byte_length, api_version)?, + byte_length: u32::from_asc_bytes(byte_length, api_version)?, padding: [0; 4], content: content.to_vec().into(), }) diff --git a/runtime/wasm/src/host.rs b/runtime/wasm/src/host.rs index e6423a572bb..ad05a576653 100644 --- a/runtime/wasm/src/host.rs +++ b/runtime/wasm/src/host.rs @@ -254,6 +254,22 @@ impl RuntimeHostTrait for RuntimeHost { fn creation_block_number(&self) -> Option { self.data_source.creation_block() } + + /// Offchain data sources track done_at which is set once the + /// trigger has been processed. + fn done_at(&self) -> Option { + match self.data_source() { + DataSource::Onchain(_) => None, + DataSource::Offchain(ds) => ds.done_at(), + } + } + + fn set_done_at(&self, block: Option) { + match self.data_source() { + DataSource::Onchain(_) => {} + DataSource::Offchain(ds) => ds.set_done_at(block), + } + } } impl PartialEq for RuntimeHost { diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index 28ef3b8192c..8c64233c975 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -11,9 +11,11 @@ use web3::types::H160; use graph::blockchain::Blockchain; use graph::components::store::EnsLookup; use graph::components::store::{EntityKey, EntityType}; -use graph::components::subgraph::{CausalityRegion, ProofOfIndexingEvent, SharedProofOfIndexing}; +use graph::components::subgraph::{ + PoICausalityRegion, ProofOfIndexingEvent, SharedProofOfIndexing, +}; use graph::data::store; -use graph::data_source::{DataSource, DataSourceTemplate}; +use graph::data_source::{CausalityRegion, DataSource, DataSourceTemplate, EntityTypeAccess}; use graph::ensure; use graph::prelude::ethabi::param_type::Reader; use graph::prelude::ethabi::{decode, encode, Token}; @@ -59,13 +61,16 @@ pub struct HostExports { pub api_version: Version, data_source_name: String, data_source_address: Vec, - data_source_network: String, + subgraph_network: String, data_source_context: Arc>, + entity_type_access: EntityTypeAccess, + data_source_causality_region: CausalityRegion, + /// Some data sources have indeterminism or different notions of time. These /// need to be each be stored separately to separate causality between them, /// and merge the results later. Right now, this is just the ethereum /// networks but will be expanded for ipfs and the availability chain. - causality_region: String, + poi_causality_region: String, templates: Arc>>, pub(crate) link_resolver: Arc, ens_lookup: Arc, @@ -75,7 +80,7 @@ impl HostExports { pub fn new( subgraph_id: DeploymentHash, data_source: &DataSource, - data_source_network: String, + subgraph_network: String, templates: Arc>>, link_resolver: Arc, ens_lookup: Arc, @@ -86,14 +91,31 @@ impl HostExports { data_source_name: data_source.name().to_owned(), data_source_address: data_source.address().unwrap_or_default(), data_source_context: data_source.context().cheap_clone(), - causality_region: CausalityRegion::from_network(&data_source_network), - data_source_network, + entity_type_access: data_source.entities(), + data_source_causality_region: data_source.causality_region(), + poi_causality_region: PoICausalityRegion::from_network(&subgraph_network), + subgraph_network, templates, link_resolver, ens_lookup, } } + /// Enfore the entity type access restrictions. See also: entity-type-access + fn check_entity_type_access(&self, entity_type: &EntityType) -> Result<(), HostExportError> { + match self.entity_type_access.allows(entity_type) { + true => Ok(()), + false => Err(HostExportError::Deterministic(anyhow!( + "entity type `{}` is not on the 'entities' list for data source `{}`. \ + Hint: Add `{}` to the 'entities' list, which currently is: `{}`.", + entity_type, + self.data_source_name, + entity_type, + self.entity_type_access + ))), + } + } + pub(crate) fn abort( &self, message: Option, @@ -136,7 +158,7 @@ impl HostExports { data: HashMap, stopwatch: &StopwatchMetrics, gas: &GasCounter, - ) -> Result<(), anyhow::Error> { + ) -> Result<(), HostExportError> { let poi_section = stopwatch.start_section("host_export_store_set__proof_of_indexing"); write_poi_event( proof_of_indexing, @@ -145,7 +167,7 @@ impl HostExports { id: &entity_id, data: &data, }, - &self.causality_region, + &self.poi_causality_region, logger, ); poi_section.end(); @@ -153,7 +175,9 @@ impl HostExports { let key = EntityKey { entity_type: EntityType::new(entity_type), entity_id: entity_id.into(), + causality_region: self.data_source_causality_region, }; + self.check_entity_type_access(&key.entity_type)?; gas.consume_host_fn(gas::STORE_SET.with_args(complexity::Linear, (&key, &data)))?; @@ -178,13 +202,15 @@ impl HostExports { entity_type: &entity_type, id: &entity_id, }, - &self.causality_region, + &self.poi_causality_region, logger, ); let key = EntityKey { entity_type: EntityType::new(entity_type), entity_id: entity_id.into(), + causality_region: self.data_source_causality_region, }; + self.check_entity_type_access(&key.entity_type)?; gas.consume_host_fn(gas::STORE_REMOVE.with_args(complexity::Size, &key))?; @@ -203,7 +229,9 @@ impl HostExports { let store_key = EntityKey { entity_type: EntityType::new(entity_type), entity_id: entity_id.into(), + causality_region: self.data_source_causality_region, }; + self.check_entity_type_access(&store_key.entity_type)?; let result = state.entity_cache.get(&store_key)?; gas.consume_host_fn(gas::STORE_GET.with_args(complexity::Linear, (&store_key, &result)))?; @@ -653,6 +681,10 @@ impl HostExports { Ok(self.ens_lookup.find_name(hash)?) } + pub(crate) fn is_ens_data_empty(&self) -> Result { + Ok(self.ens_lookup.is_table_empty()?) + } + pub(crate) fn log_log( &self, logger: &Logger, @@ -691,7 +723,7 @@ impl HostExports { gas: &GasCounter, ) -> Result { gas.consume_host_fn(Gas::new(gas::DEFAULT_BASE_COST))?; - Ok(self.data_source_network.clone()) + Ok(self.subgraph_network.clone()) } pub(crate) fn data_source_context( diff --git a/runtime/wasm/src/mapping.rs b/runtime/wasm/src/mapping.rs index cd1198bbbdf..f625de41957 100644 --- a/runtime/wasm/src/mapping.rs +++ b/runtime/wasm/src/mapping.rs @@ -99,7 +99,8 @@ where host_metrics.cheap_clone(), timeout, experimental_features, - )?; + ) + .context("module instantiation failed")?; section.end(); let _section = host_metrics.stopwatch.start_section("run_handler"); diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index 327b00e5e85..d7d473583a3 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -207,11 +207,15 @@ impl WasmInstance { .get_func(handler) .with_context(|| format!("function {} not found", handler))?; + let func = func + .typed() + .context("wasm function has incorrect signature")?; + // Caution: Make sure all exit paths from this function call `exit_handler`. self.instance_ctx_mut().ctx.state.enter_handler(); // This `match` will return early if there was a non-deterministic trap. - let deterministic_error: Option = match func.typed()?.call(arg.wasm_ptr()) { + let deterministic_error: Option = match func.call(arg.wasm_ptr()) { Ok(()) => None, Err(trap) if self.instance_ctx().possible_reorg => { self.instance_ctx_mut().ctx.state.exit_handler(); @@ -249,7 +253,7 @@ impl WasmInstance { }; if let Some(deterministic_error) = deterministic_error { - let message = format!("{:#}", deterministic_error).replace("\n", "\t"); + let message = format!("{:#}", deterministic_error).replace('\n', "\t"); // Log the error and restore the updates snapshot, effectively reverting the handler. error!(&self.instance_ctx().ctx.logger, @@ -1685,6 +1689,12 @@ impl WasmInstanceContext { let hash: String = asc_get(self, hash_ptr, gas)?; let name = self.ctx.host_exports.ens_name_by_hash(&*hash)?; + if name.is_none() && self.ctx.host_exports.is_ens_data_empty()? { + return Err(anyhow!( + "Missing ENS data: see https://github.com/graphprotocol/ens-rainbow" + ) + .into()); + } // map `None` to `null`, and `Some(s)` to a runtime string name.map(|name| asc_new(self, &*name, gas).map_err(Into::into)) diff --git a/runtime/wasm/src/to_from/mod.rs b/runtime/wasm/src/to_from/mod.rs index 334f74c08fc..9cc800e5105 100644 --- a/runtime/wasm/src/to_from/mod.rs +++ b/runtime/wasm/src/to_from/mod.rs @@ -78,7 +78,7 @@ impl FromAscObj for String { // Strip null characters since they are not accepted by Postgres. if string.contains('\u{0000}') { - string = string.replace("\u{0000}", ""); + string = string.replace('\u{0000}', ""); } Ok(string) } diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 00000000000..f0d51c34aef --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,3 @@ +[toolchain] +channel = "1.66.0" +profile = "default" diff --git a/scripts/README.md b/scripts/README.md index 0786d338c00..c749a79d70f 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -1,39 +1,19 @@ -# scripts +# Scripts -## update-cargo-version.sh +## `release.sh` -This script helps to update the versions of all `Cargo.toml` files and the `Cargo.lock` one. +1. Checks that all workspace crates use the same version via `version.workspace = true`. +2. Updates the version in the root `Cargo.toml` as indicated by the user: `major`, `minor`, or `patch`. +3. Updates `Cargo.lock` via `cargo check --tests`. +4. Adds the changes in a `Release vX.Y.Z` commit. -### Functionality - -It does more than what's listed below, but this represents the main behavior of it. - -1. Asserts that currently all `Cargo.toml` files in the repository have the **same version**; -2. Changes all of the crates to use the new provided version: `patch`, `minor` or `major`; -3. Updates the `Cargo.lock` via `cargo check --tests`; -4. Adds the changes in a `Release X.Y.Z` commit. +Upon failure, the script will print some kind of error message and stop before committing the changes. ### Usage -The only argument it accepts is the type of version you want to do `(patch|minor|major)`. +The only argument it accepts is the type of release you want to do. ```bash -./scripts/update-cargo-version.sh patch +# E.g. you're on v0.28.0 and must relese v0.28.1. +$ ./scripts/release.sh patch ``` - -Example output: - -``` -Current version: "0.25.1" -New version: "0.25.2" -Changing 18 toml files -Toml files are still consistent in their version after the update -Updating Cargo.lock file - Finished dev [unoptimized + debuginfo] target(s) in 0.58s -Cargo.lock file updated -Updating version of the Cargo.{lock, toml} files succeded! -[otavio/update-news-0-25-2 2f2175bae] Release 0.25.2 - 20 files changed, 38 insertions(+), 38 deletions(-) -``` - -This script contains several assertions to make sure no mistake has been made. Unfortunately for now we don't have a way to revert it, or to recover from an error when it fails in the middle of it, this can be improved in the future. diff --git a/scripts/abort.sh b/scripts/abort.sh deleted file mode 100644 index ed47868ce34..00000000000 --- a/scripts/abort.sh +++ /dev/null @@ -1,6 +0,0 @@ -abort () { - local ERROR_MESSAGE=$1 - echo "Release failed, error message:" - echo $ERROR_MESSAGE - exit 1 -} diff --git a/scripts/lines-unique.sh b/scripts/lines-unique.sh deleted file mode 100755 index 186ea75e4ed..00000000000 --- a/scripts/lines-unique.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -source 'scripts/abort.sh' - -# Exits with code 0 if lines are unique, 1 otherwise -LINES=$@ - -# Validate that parameters are being sent -[ -z "$LINES" ] && abort "No lines received" - -if [ $(echo $LINES | tr " " "\n" | sort | uniq | wc -l) -eq 1 ]; then - exit 0 -else - exit 1 -fi diff --git a/scripts/release.sh b/scripts/release.sh new file mode 100755 index 00000000000..d86e6113e21 --- /dev/null +++ b/scripts/release.sh @@ -0,0 +1,111 @@ +#!/usr/bin/env bash +set -eo pipefail + +# TODO: Maybe we should revert all changes if the script fails halfway through? + +abort () { + local FAIL_MSG=$@ + echo "$FAIL_MSG" + exit 1 +} + +abort_failed_to_update () { + local FILE_NAME=$@ + abort "💀 Failed to update $FILE_NAME. Aborting." +} + +assert_all_cargo_tomls_inherit_version () { + ERROR=0 + # Get all files named Cargo.toml excluding the `integration-tests` folder and + # the root Cargo.toml. + CARGO_TOMLS=$( + find . -name Cargo.toml | \ + grep -v integration-tests | \ + grep -v '\./Cargo.toml' + ) + for CARGO_TOML in $CARGO_TOMLS + do + # Good files have a line that looks like `version.workspace = true`. Bad + # files don't. + VERSION_LINE=$(grep '^version' $CARGO_TOML) + if [[ $VERSION_LINE != "version.workspace = true" ]]; then + echo "⚠️ $CARGO_TOML does not inherit the crate version from the root workspace." + ERROR=1 + fi + done + + if [[ $ERROR == 1 ]]; then + echo "💀 All crates must inherit the workspace's crate version." + echo " " + abort " Aborting." + fi +} + +get_toml_version () { + echo $(grep '^version =' Cargo.toml | cut -d '"' -f2) +} + +main () { + CURRENT_VERSION=$(get_toml_version) + assert_all_cargo_tomls_inherit_version + + # Increment by CLI argument (major, minor, patch) + MAJOR=$(echo $CURRENT_VERSION | cut -d. -f1) + MINOR=$(echo $CURRENT_VERSION | cut -d. -f2) + PATCH=$(echo $CURRENT_VERSION | cut -d. -f3) + + case $1 in + "major") + let "++MAJOR" + MINOR=0 + PATCH=0 + ;; + "minor") + # Preincrement to avoid early exit with set -e: + # https://stackoverflow.com/questions/7247279/bash-set-e-and-i-0let-i-do-not-agree + let "++MINOR" + PATCH=0 + ;; + "patch") + let "++PATCH" + ;; + *) + abort "💀 Bad CLI usage! Version argument should be one of: major, minor or patch" + ;; + esac + + echo " - Current version: \"$CURRENT_VERSION\"" + NEW_VERSION="${MAJOR}.${MINOR}.${PATCH}" + echo " - New version: \"$NEW_VERSION\"" + + echo "⏳ Updating Cargo.toml..." + + # Works both on GNU and BSD sed (for macOS users) + # See: + # - https://unix.stackexchange.com/questions/401905/bsd-sed-vs-gnu-sed-and-i + # - https://stackoverflow.com/a/22084103/5148606 + sed -i.backup "s/^version = \"${CURRENT_VERSION}\"/version = \"${NEW_VERSION}\"/g" Cargo.toml + rm Cargo.toml.backup + + if [[ $(git diff Cargo.toml) ]]; then + echo "✅ Cargo.toml successfully updated." + else + abort_failed_to_update Cargo.toml + fi + + echo "⏳ Updating Cargo.lock..." + cargo check --tests + if [[ $(git diff Cargo.lock) ]]; then + echo "✅ Cargo.lock successfully updated." + else + abort_failed_to_update Cargo.lock + fi + + echo "⏳ Committing changes..." + git add Cargo.lock Cargo.toml + git commit -m "Release v${NEW_VERSION}" + + echo "🎉 Done!" +} + +main "$@" diff --git a/scripts/toml-utils.sh b/scripts/toml-utils.sh deleted file mode 100755 index 86872067b8e..00000000000 --- a/scripts/toml-utils.sh +++ /dev/null @@ -1,19 +0,0 @@ -# Get all files named 'Cargo.toml' in the `graph-node` directory, excluding the `integration-tests` folder. -get_all_toml_files () { - echo "$(find . -name Cargo.toml | grep -v integration-tests)" -} - -get_all_toml_versions () { - local FILE_NAMES=$@ - echo $( - echo $FILE_NAMES | \ - # Read all 'Cargo.toml' file contents. - xargs cat | \ - # Get the 'version' key of the TOML, eg: version = "0.25.2" - grep '^version = ' | \ - # Remove the '"' enclosing the version, eg: "0.25.2" - tr -d '"' | \ - # Get only the version number, eg: 0.25.2 - awk '{print $3}' \ - ) -} diff --git a/scripts/update-cargo-version.sh b/scripts/update-cargo-version.sh deleted file mode 100755 index 1471d0a9a81..00000000000 --- a/scripts/update-cargo-version.sh +++ /dev/null @@ -1,92 +0,0 @@ -#!/bin/bash - -source 'scripts/abort.sh' -source 'scripts/toml-utils.sh' - -ALL_TOML_FILE_NAMES=$(get_all_toml_files) -ALL_TOML_VERSIONS=$(get_all_toml_versions $ALL_TOML_FILE_NAMES) - -# Asserts all .toml versions are currently the same, otherwise abort. -if ./scripts/lines-unique.sh $ALL_TOML_VERSIONS; then - CURRENT_VERSION=$(echo $ALL_TOML_VERSIONS | awk '{print $1}') - echo "Current version: \"$CURRENT_VERSION\"" -else - abort "Some Cargo.toml files have different versions than others, make sure they're all the same before creating a new release" -fi - - -# Increment by CLI argument (major, minor, patch) -MAJOR=$(echo $CURRENT_VERSION | cut -d. -f1) -MINOR=$(echo $CURRENT_VERSION | cut -d. -f2) -PATCH=$(echo $CURRENT_VERSION | cut -d. -f3) - -case $1 in - "major") - let "MAJOR++" - MINOR=0 - PATCH=0 - ;; - "minor") - let "MINOR++" - PATCH=0 - ;; - "patch") - let "PATCH++" - ;; - *) - abort "Version argument should be one of: major, minor or patch" - ;; -esac - -NEW_VERSION="${MAJOR}.${MINOR}.${PATCH}" -echo "New version: \"$NEW_VERSION\"" - - -# Replace on all .toml files -echo "Changing $(echo $ALL_TOML_VERSIONS | tr " " "\n" | wc -l | awk '{$1=$1;print}') toml files" - -# MacOS/OSX unfortunately doesn't have the same API for `sed`, so we're -# using `perl` instead since it's installed by default in all Mac machines. -# -# For mor info: https://stackoverflow.com/questions/4247068/sed-command-with-i-option-failing-on-mac-but-works-on-linux -if [[ "$OSTYPE" == "darwin"* ]]; then - perl -i -pe"s/^version = \"${CURRENT_VERSION}\"/version = \"${NEW_VERSION}\"/" $ALL_TOML_FILE_NAMES -# Default, for decent OSs (eg: GNU-Linux) -else - sed -i "s/^version = \"${CURRENT_VERSION}\"/version = \"${NEW_VERSION}\"/" $ALL_TOML_FILE_NAMES -fi - - -# Assert all the new .toml versions are the same, otherwise abort -UPDATED_TOML_VERSIONS=$(get_all_toml_versions $ALL_TOML_FILE_NAMES) -if ./scripts/lines-unique.sh $UPDATED_TOML_VERSIONS; then - echo "Toml files are still consistent in their version after the update" -else - abort "Something went wrong with the version replacement and the new version isn't the same across the Cargo.toml files" -fi - - -# Assert there was a git diff in the changed files, otherwise abort -if [[ $(git diff $ALL_TOML_FILE_NAMES) ]]; then - : -else - abort "Somehow the toml files didn't get changed" -fi - - -echo "Updating Cargo.lock file" -cargo check --tests - - -# Assert .lock file changed the versions, otherwise abort -if [[ $(git diff Cargo.lock) ]]; then - echo "Cargo.lock file updated" -else - abort "There was no change in the Cargo.lock file, something went wrong with updating the crates versions" -fi - - -echo "Updating version of the Cargo.{lock, toml} files succeded!" - -git add Cargo.lock $ALL_TOML_FILE_NAMES -git commit -m "Release ${NEW_VERSION}" diff --git a/server/http/Cargo.toml b/server/http/Cargo.toml index 34387f4c3a6..41e2efb80c6 100644 --- a/server/http/Cargo.toml +++ b/server/http/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "graph-server-http" -version = "0.27.0" -edition = "2021" +version.workspace = true +edition.workspace = true [dependencies] futures = "0.1.21" diff --git a/server/http/src/request.rs b/server/http/src/request.rs index 05a7b083b62..8c659a7c199 100644 --- a/server/http/src/request.rs +++ b/server/http/src/request.rs @@ -4,7 +4,7 @@ use hyper::body::Bytes; use graph::components::server::query::GraphQLServerError; use graph::prelude::*; -pub fn parse_graphql_request(body: &Bytes) -> Result { +pub fn parse_graphql_request(body: &Bytes, trace: bool) -> Result { // Parse request body as JSON let json: serde_json::Value = serde_json::from_slice(body) .map_err(|e| GraphQLServerError::ClientError(format!("{}", e)))?; @@ -42,13 +42,12 @@ pub fn parse_graphql_request(body: &Bytes) -> Result )), }?; - Ok(Query::new(document, variables)) + Ok(Query::new(document, variables, trace)) } #[cfg(test)] mod tests { - use graphql_parser; - use hyper; + use std::collections::HashMap; use graph::{ @@ -67,33 +66,35 @@ mod tests { #[test] fn rejects_invalid_json() { - let request = parse_graphql_request(&hyper::body::Bytes::from("!@#)%")); + let request = parse_graphql_request(&hyper::body::Bytes::from("!@#)%"), false); request.expect_err("Should reject invalid JSON"); } #[test] fn rejects_json_without_query_field() { - let request = parse_graphql_request(&hyper::body::Bytes::from("{}")); + let request = parse_graphql_request(&hyper::body::Bytes::from("{}"), false); request.expect_err("Should reject JSON without query field"); } #[test] fn rejects_json_with_non_string_query_field() { - let request = parse_graphql_request(&hyper::body::Bytes::from("{\"query\": 5}")); + let request = parse_graphql_request(&hyper::body::Bytes::from("{\"query\": 5}"), false); request.expect_err("Should reject JSON with a non-string query field"); } #[test] fn rejects_broken_queries() { - let request = parse_graphql_request(&hyper::body::Bytes::from("{\"query\": \"foo\"}")); + let request = + parse_graphql_request(&hyper::body::Bytes::from("{\"query\": \"foo\"}"), false); request.expect_err("Should reject broken queries"); } #[test] fn accepts_valid_queries() { - let request = parse_graphql_request(&hyper::body::Bytes::from( - "{\"query\": \"{ user { name } }\"}", - )); + let request = parse_graphql_request( + &hyper::body::Bytes::from("{\"query\": \"{ user { name } }\"}"), + false, + ); let query = request.expect("Should accept valid queries"); assert_eq!( query.document, @@ -105,13 +106,16 @@ mod tests { #[test] fn accepts_null_variables() { - let request = parse_graphql_request(&hyper::body::Bytes::from( - "\ + let request = parse_graphql_request( + &hyper::body::Bytes::from( + "\ {\ \"query\": \"{ user { name } }\", \ \"variables\": null \ }", - )); + ), + false, + ); let query = request.expect("Should accept null variables"); let expected_query = graphql_parser::parse_query("{ user { name } }") @@ -123,27 +127,33 @@ mod tests { #[test] fn rejects_non_map_variables() { - let request = parse_graphql_request(&hyper::body::Bytes::from( - "\ + let request = parse_graphql_request( + &hyper::body::Bytes::from( + "\ {\ \"query\": \"{ user { name } }\", \ \"variables\": 5 \ }", - )); + ), + false, + ); request.expect_err("Should reject non-map variables"); } #[test] fn parses_variables() { - let request = parse_graphql_request(&hyper::body::Bytes::from( - "\ + let request = parse_graphql_request( + &hyper::body::Bytes::from( + "\ {\ \"query\": \"{ user { name } }\", \ \"variables\": { \ \"string\": \"s\", \"map\": {\"k\": \"v\"}, \"int\": 5 \ } \ }", - )); + ), + false, + ); let query = request.expect("Should accept valid queries"); let expected_query = graphql_parser::parse_query("{ user { name } }") diff --git a/server/http/src/server.rs b/server/http/src/server.rs index e605f798c2c..a99e8bafe05 100644 --- a/server/http/src/server.rs +++ b/server/http/src/server.rs @@ -11,13 +11,7 @@ use thiserror::Error; #[derive(Debug, Error)] pub enum GraphQLServeError { #[error("Bind error: {0}")] - BindError(hyper::Error), -} - -impl From for GraphQLServeError { - fn from(err: hyper::Error) -> Self { - GraphQLServeError::BindError(err) - } + BindError(#[from] hyper::Error), } /// A GraphQL server based on Hyper. diff --git a/server/http/src/service.rs b/server/http/src/service.rs index cfc560445a7..5d0ef6ded14 100644 --- a/server/http/src/service.rs +++ b/server/http/src/service.rs @@ -97,13 +97,13 @@ where let mut version = ApiVersion::default(); if let Some(query) = request.uri().query() { - let potential_version_requirement = query.split("&").find_map(|pair| { + let potential_version_requirement = query.split('&').find_map(|pair| { if pair.starts_with("api-version=") { - if let Some(version_requirement) = pair.split("=").nth(1) { + if let Some(version_requirement) = pair.split('=').nth(1) { return Some(version_requirement); } } - return None; + None }); if let Some(version_requirement) = potential_version_requirement { @@ -111,7 +111,7 @@ where &VersionReq::parse(version_requirement) .map_err(|error| GraphQLServerError::ClientError(error.to_string()))?, ) - .map_err(|error| GraphQLServerError::ClientError(error))?; + .map_err(GraphQLServerError::ClientError)?; } } @@ -128,11 +128,8 @@ where GraphQLServerError::ClientError(format!("Invalid subgraph name {:?}", subgraph_name)) })?; - self.handle_graphql_query( - QueryTarget::Name(subgraph_name, version), - request.into_body(), - ) - .await + self.handle_graphql_query(QueryTarget::Name(subgraph_name, version), request) + .await } fn handle_graphql_query_by_id( @@ -150,7 +147,7 @@ where match res { Err(_) => self.handle_not_found(), Ok((id, version)) => self - .handle_graphql_query(QueryTarget::Deployment(id, version), request.into_body()) + .handle_graphql_query(QueryTarget::Deployment(id, version), request) .boxed(), } } @@ -158,15 +155,27 @@ where async fn handle_graphql_query( self, target: QueryTarget, - request_body: Body, + request: Request, ) -> GraphQLServiceResult { let service = self.clone(); let start = Instant::now(); - let body = hyper::body::to_bytes(request_body) + let trace = { + !ENV_VARS.graphql.query_trace_token.is_empty() + && request + .headers() + .get("X-GraphTraceQuery") + .map(|v| { + v.to_str() + .map(|s| s == &ENV_VARS.graphql.query_trace_token) + .unwrap_or(false) + }) + .unwrap_or(false) + }; + let body = hyper::body::to_bytes(request.into_body()) .map_err(|_| GraphQLServerError::InternalError("Failed to read request body".into())) .await?; - let query = parse_graphql_request(&body); + let query = parse_graphql_request(&body, trace); let query_parsing_time = start.elapsed(); let result = match query { @@ -254,8 +263,7 @@ where (Method::GET, path @ ["subgraphs", "id", _]) | (Method::GET, path @ ["subgraphs", "name", _]) | (Method::GET, path @ ["subgraphs", "name", _, _]) - | (Method::GET, path @ ["subgraphs", "network", _, _]) - | (Method::GET, path @ ["subgraphs"]) => { + | (Method::GET, path @ ["subgraphs", "network", _, _]) => { let dest = format!("/{}/graphql", path.join("/")); self.handle_temp_redirect(dest).boxed() } @@ -371,6 +379,7 @@ mod tests { fn observe_query_execution(&self, _duration: Duration, _results: &QueryResults) {} fn observe_query_parsing(&self, _duration: Duration, _results: &QueryResults) {} fn observe_query_validation(&self, _duration: Duration, _id: &DeploymentHash) {} + fn observe_query_validation_error(&self, _error_codes: Vec<&str>, _id: &DeploymentHash) {} } #[async_trait] diff --git a/server/http/tests/server.rs b/server/http/tests/server.rs index 856a4e9aac2..589e10d696b 100644 --- a/server/http/tests/server.rs +++ b/server/http/tests/server.rs @@ -20,6 +20,7 @@ impl GraphQLMetrics for TestGraphQLMetrics { fn observe_query_execution(&self, _duration: Duration, _results: &QueryResults) {} fn observe_query_parsing(&self, _duration: Duration, _results: &QueryResults) {} fn observe_query_validation(&self, _duration: Duration, _id: &DeploymentHash) {} + fn observe_query_validation_error(&self, _error_codes: Vec<&str>, _id: &DeploymentHash) {} } /// A simple stupid query runner for testing. @@ -88,6 +89,8 @@ impl GraphQlRunner for TestGraphQlRunner { #[cfg(test)] mod test { + use graph_mock::MockMetricsRegistry; + use super::*; lazy_static! { @@ -100,7 +103,7 @@ mod test { runtime .block_on(async { let logger = Logger::root(slog::Discard, o!()); - let logger_factory = LoggerFactory::new(logger, None); + let logger_factory = LoggerFactory::new(logger, None, Arc::new(MockMetricsRegistry::new())); let id = USERS.clone(); let query_runner = Arc::new(TestGraphQlRunner); let node_id = NodeId::new("test").unwrap(); @@ -141,7 +144,8 @@ mod test { let runtime = tokio::runtime::Runtime::new().unwrap(); runtime.block_on(async { let logger = Logger::root(slog::Discard, o!()); - let logger_factory = LoggerFactory::new(logger, None); + let logger_factory = + LoggerFactory::new(logger, None, Arc::new(MockMetricsRegistry::new())); let id = USERS.clone(); let query_runner = Arc::new(TestGraphQlRunner); let node_id = NodeId::new("test").unwrap(); @@ -221,7 +225,8 @@ mod test { let runtime = tokio::runtime::Runtime::new().unwrap(); runtime.block_on(async { let logger = Logger::root(slog::Discard, o!()); - let logger_factory = LoggerFactory::new(logger, None); + let logger_factory = + LoggerFactory::new(logger, None, Arc::new(MockMetricsRegistry::new())); let id = USERS.clone(); let query_runner = Arc::new(TestGraphQlRunner); let node_id = NodeId::new("test").unwrap(); @@ -266,7 +271,8 @@ mod test { let runtime = tokio::runtime::Runtime::new().unwrap(); let _ = runtime.block_on(async { let logger = Logger::root(slog::Discard, o!()); - let logger_factory = LoggerFactory::new(logger, None); + let logger_factory = + LoggerFactory::new(logger, None, Arc::new(MockMetricsRegistry::new())); let id = USERS.clone(); let query_runner = Arc::new(TestGraphQlRunner); let node_id = NodeId::new("test").unwrap(); diff --git a/server/index-node/Cargo.toml b/server/index-node/Cargo.toml index 15943699963..9088d7ff1ab 100644 --- a/server/index-node/Cargo.toml +++ b/server/index-node/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "graph-server-index-node" -version = "0.27.0" -edition = "2021" +version.workspace = true +edition.workspace = true [dependencies] -blake3 = "1.0" +blake3 = "1.3" either = "1.8.0" futures = "0.3.4" graph = { path = "../../graph" } diff --git a/server/index-node/src/explorer.rs b/server/index-node/src/explorer.rs index 59fc5dbb930..ea1b0481513 100644 --- a/server/index-node/src/explorer.rs +++ b/server/index-node/src/explorer.rs @@ -96,8 +96,8 @@ where totalEthereumBlocksCount: total_ethereum_blocks_count, synced: vi.synced, failed: vi.failed, - description: vi.description.as_ref().map(|s| s.as_str()), - repository: vi.repository.as_ref().map(|s| s.as_str()), + description: vi.description.as_deref(), + repository: vi.repository.as_deref(), schema: vi.schema.document.to_string(), network: vi.network.as_str() }; @@ -110,7 +110,7 @@ where let value = object! { createdAt: vi.created_at.as_str(), deploymentId: vi.deployment_id.as_str(), - repository: vi.repository.as_ref().map(|s| s.as_str()) + repository: vi.repository.as_deref() }; Ok(as_http_response(&value)) } diff --git a/server/index-node/src/lib.rs b/server/index-node/src/lib.rs index bbf1994dfcd..1db9861b1e9 100644 --- a/server/index-node/src/lib.rs +++ b/server/index-node/src/lib.rs @@ -8,3 +8,6 @@ mod service; pub use self::auth::PoiProtection; pub use self::server::IndexNodeServer; pub use self::service::{IndexNodeService, IndexNodeServiceResponse}; + +#[cfg(debug_assertions)] +pub use self::resolver::IndexNodeResolver; diff --git a/server/index-node/src/resolver.rs b/server/index-node/src/resolver.rs index 43aaf783e20..b80029d01b7 100644 --- a/server/index-node/src/resolver.rs +++ b/server/index-node/src/resolver.rs @@ -93,7 +93,7 @@ impl IndexNodeResolver { .argument_value("subgraphs") .map(|value| match value { r::Value::List(ids) => ids - .into_iter() + .iter() .map(|id| match id { r::Value::String(s) => s.clone(), _ => unreachable!(), @@ -101,7 +101,7 @@ impl IndexNodeResolver { .collect(), _ => unreachable!(), }) - .unwrap_or_else(|| Vec::new()); + .unwrap_or_else(Vec::new); let infos = self .store @@ -334,7 +334,7 @@ impl IndexNodeResolver { "block_hash" => format!("{}", block_hash), "error" => e.to_string(), ); - return Err(QueryExecutionError::StoreError(Error::from(e).into())); + return Err(QueryExecutionError::StoreError(e.into())); } }; @@ -347,7 +347,7 @@ impl IndexNodeResolver { block: object! { hash: cached_call.block_ptr.hash.hash_hex(), number: cached_call.block_ptr.number, - timestamp: timestamp.clone(), + timestamp: timestamp, }, contractAddress: &cached_call.contract_address[..], returnValue: &cached_call.return_value[..], @@ -451,10 +451,7 @@ impl IndexNodeResolver { Some((ref block, _)) => block.clone(), None => PartialBlockPtr::from(request.block_number), }, - proof_of_indexing: match poi_result { - Some((_, poi)) => Some(poi), - None => None, - }, + proof_of_indexing: poi_result.map(|(_, poi)| poi), }) .map(IntoValue::into_value) .collect(), diff --git a/server/index-node/src/server.rs b/server/index-node/src/server.rs index 2fdc6ddbb4d..7222b40e9ca 100644 --- a/server/index-node/src/server.rs +++ b/server/index-node/src/server.rs @@ -1,4 +1,3 @@ -use hyper; use hyper::service::make_service_fn; use hyper::Server; use std::net::{Ipv4Addr, SocketAddrV4}; @@ -16,13 +15,7 @@ use thiserror::Error; #[derive(Debug, Error)] pub enum IndexNodeServeError { #[error("Bind error: {0}")] - BindError(hyper::Error), -} - -impl From for IndexNodeServeError { - fn from(err: hyper::Error) -> Self { - IndexNodeServeError::BindError(err) - } + BindError(#[from] hyper::Error), } /// A GraphQL server based on Hyper. @@ -90,8 +83,8 @@ where let service = IndexNodeService::new( logger_for_service.clone(), self.blockchain_map.clone(), - graphql_runner.clone(), - store.clone(), + graphql_runner, + store, self.link_resolver.clone(), ); let new_service = diff --git a/server/index-node/src/service.rs b/server/index-node/src/service.rs index 81ae88d113d..3f013449fa9 100644 --- a/server/index-node/src/service.rs +++ b/server/index-node/src/service.rs @@ -6,7 +6,7 @@ use http::header::{ use hyper::body::Bytes; use hyper::service::Service; use hyper::{Body, Method, Request, Response, StatusCode}; -use serde_json; + use std::task::Context; use std::task::Poll; @@ -14,7 +14,6 @@ use graph::components::{server::query::GraphQLServerError, store::Store}; use graph::data::query::QueryResults; use graph::prelude::*; use graph_graphql::prelude::{execute_query, Query as PreparedQuery, QueryExecutionOptions}; -use graphql_parser; use crate::auth::bearer_token; @@ -28,6 +27,7 @@ impl GraphQLMetrics for NoopGraphQLMetrics { fn observe_query_execution(&self, _duration: Duration, _results: &QueryResults) {} fn observe_query_parsing(&self, _duration: Duration, _results: &QueryResults) {} fn observe_query_validation(&self, _duration: Duration, _id: &DeploymentHash) {} + fn observe_query_validation_error(&self, _error_codes: Vec<&str>, _id: &DeploymentHash) {} } /// An asynchronous response to a GraphQL request. @@ -111,10 +111,10 @@ where Self::serve_file(Self::graphiql_html(), "text/html") } - async fn handle_graphql_query( + pub async fn handle_graphql_query( &self, request: Request, - ) -> Result, GraphQLServerError> { + ) -> Result { let (req_parts, req_body) = request.into_parts(); let store = self.store.clone(); @@ -138,7 +138,7 @@ where Arc::new(NoopGraphQLMetrics), ) { Ok(query) => query, - Err(e) => return Ok(QueryResults::from(QueryResult::from(e)).as_http_response()), + Err(e) => return Ok(QueryResults::from(QueryResult::from(e))), }; let load_manager = self.graphql_runner.load_manager(); @@ -146,7 +146,7 @@ where // Run the query using the index node resolver let query_clone = query.cheap_clone(); let logger = self.logger.cheap_clone(); - let result = { + let result: QueryResult = { let resolver = IndexNodeResolver::new( &logger, store, @@ -160,16 +160,15 @@ where max_first: std::u32::MAX, max_skip: std::u32::MAX, load_manager, + trace: false, }; let result = execute_query(query_clone.cheap_clone(), None, None, options).await; query_clone.log_execution(0); - QueryResult::from( - // Index status queries are not cacheable, so we may unwrap this. - Arc::try_unwrap(result).unwrap(), - ) + // Index status queries are not cacheable, so we may unwrap this. + Arc::try_unwrap(result).unwrap() }; - Ok(QueryResults::from(result).as_http_response()) + Ok(QueryResults::from(result)) } // Handles OPTIONS requests @@ -241,7 +240,9 @@ where } (Method::GET, ["graphql", "playground"]) => Ok(Self::handle_graphiql()), - (Method::POST, ["graphql"]) => self.handle_graphql_query(req).await, + (Method::POST, ["graphql"]) => { + Ok(self.handle_graphql_query(req).await?.as_http_response()) + } (Method::OPTIONS, ["graphql"]) => Ok(Self::handle_graphql_options(req)), (Method::GET, ["explorer", rest @ ..]) => self.explorer.handle(&self.logger, rest), @@ -355,7 +356,7 @@ impl ValidatedRequest { )), }?; - let query = Query::new(document, variables); + let query = Query::new(document, variables, false); let bearer_token = bearer_token(headers) .map(<[u8]>::to_vec) .map(String::from_utf8) @@ -374,7 +375,7 @@ impl ValidatedRequest { #[cfg(test)] mod tests { use graph::{data::value::Object, prelude::*}; - use graphql_parser; + use hyper::body::Bytes; use hyper::HeaderMap; use std::collections::HashMap; diff --git a/server/json-rpc/Cargo.toml b/server/json-rpc/Cargo.toml index 829a2b5ad47..6434dce33ac 100644 --- a/server/json-rpc/Cargo.toml +++ b/server/json-rpc/Cargo.toml @@ -1,10 +1,9 @@ [package] name = "graph-server-json-rpc" -version = "0.27.0" -edition = "2021" +version.workspace = true +edition.workspace = true [dependencies] graph = { path = "../../graph" } -jsonrpc-http-server = "18.0.0" -lazy_static = "1.2.0" +jsonrpsee = { version = "0.15.1", features = ["http-server"] } serde = "1.0" diff --git a/server/json-rpc/src/lib.rs b/server/json-rpc/src/lib.rs index d076a8d1c16..c720905345e 100644 --- a/server/json-rpc/src/lib.rs +++ b/server/json-rpc/src/lib.rs @@ -1,49 +1,80 @@ -extern crate graph; -extern crate jsonrpc_http_server; -extern crate lazy_static; -extern crate serde; - -use graph::prelude::serde_json; -use graph::prelude::{JsonRpcServer as JsonRpcServerTrait, *}; -use jsonrpc_http_server::{ - jsonrpc_core::{self, Compatibility, IoHandler, Params, Value}, - RestApi, Server, ServerBuilder, -}; +use graph::prelude::{Value as GraphValue, *}; +use jsonrpsee::core::Error as JsonRpcError; +use jsonrpsee::http_server::{HttpServerBuilder, HttpServerHandle}; +use jsonrpsee::types::error::CallError; +use jsonrpsee::types::ErrorObject; +use jsonrpsee::RpcModule; +use serde_json::{self, Value as JsonValue}; use std::collections::BTreeMap; -use std::io; -use std::net::{Ipv4Addr, SocketAddrV4}; +use std::net::{Ipv4Addr, SocketAddr}; -const JSON_RPC_DEPLOY_ERROR: i64 = 0; -const JSON_RPC_REMOVE_ERROR: i64 = 1; -const JSON_RPC_CREATE_ERROR: i64 = 2; -const JSON_RPC_REASSIGN_ERROR: i64 = 3; +type JsonRpcResult = Result; -#[derive(Debug, Deserialize)] -struct SubgraphCreateParams { - name: SubgraphName, +pub struct JsonRpcServer { + // TODO: in the future we might want to have some sort of async drop to stop + // the server. For now, we're just letting it run it forever. + _handle: HttpServerHandle, } -#[derive(Debug, Deserialize)] -struct SubgraphDeployParams { - name: SubgraphName, - ipfs_hash: DeploymentHash, - node_id: Option, - debug_fork: Option, -} +impl JsonRpcServer { + pub async fn serve( + port: u16, + http_port: u16, + ws_port: u16, + registrar: Arc, + node_id: NodeId, + logger: Logger, + ) -> JsonRpcResult + where + R: SubgraphRegistrar, + { + let logger = logger.new(o!("component" => "JsonRpcServer")); -#[derive(Debug, Deserialize)] -struct SubgraphRemoveParams { - name: SubgraphName, -} + info!( + logger, + "Starting JSON-RPC admin server at: http://localhost:{}", port + ); -#[derive(Debug, Deserialize)] -struct SubgraphReassignParams { - ipfs_hash: DeploymentHash, - node_id: NodeId, + let state = ServerState { + registrar, + http_port, + ws_port, + node_id, + logger, + }; + + let socket_addr: SocketAddr = (Ipv4Addr::new(0, 0, 0, 0), port).into(); + let http_server = HttpServerBuilder::default().build(socket_addr).await?; + + let mut rpc_module = RpcModule::new(state); + rpc_module + .register_async_method("subgraph_create", |params, state| async move { + state.create_handler(params.parse()?).await + }) + .unwrap(); + rpc_module + .register_async_method("subgraph_deploy", |params, state| async move { + state.deploy_handler(params.parse()?).await + }) + .unwrap(); + rpc_module + .register_async_method("subgraph_remove", |params, state| async move { + state.remove_handler(params.parse()?).await + }) + .unwrap(); + rpc_module + .register_async_method("subgraph_reassign", |params, state| async move { + state.reassign_handler(params.parse()?).await + }) + .unwrap(); + + let _handle = http_server.start(rpc_module)?; + Ok(Self { _handle }) + } } -pub struct JsonRpcServer { +struct ServerState { registrar: Arc, http_port: u16, ws_port: u16, @@ -51,12 +82,14 @@ pub struct JsonRpcServer { logger: Logger, } -impl JsonRpcServer { +impl ServerState { + const DEPLOY_ERROR: i64 = 0; + const REMOVE_ERROR: i64 = 1; + const CREATE_ERROR: i64 = 2; + const REASSIGN_ERROR: i64 = 3; + /// Handler for the `subgraph_create` endpoint. - async fn create_handler( - &self, - params: SubgraphCreateParams, - ) -> Result { + async fn create_handler(&self, params: SubgraphCreateParams) -> JsonRpcResult { info!(&self.logger, "Received subgraph_create request"; "params" => format!("{:?}", params)); match self.registrar.create_subgraph(params.name.clone()).await { @@ -67,17 +100,14 @@ impl JsonRpcServer { &self.logger, "subgraph_create", e, - JSON_RPC_CREATE_ERROR, + Self::CREATE_ERROR, params, )), } } /// Handler for the `subgraph_deploy` endpoint. - async fn deploy_handler( - &self, - params: SubgraphDeployParams, - ) -> Result { + async fn deploy_handler(&self, params: SubgraphDeployParams) -> JsonRpcResult { info!(&self.logger, "Received subgraph_deploy request"; "params" => format!("{:?}", params)); let node_id = params.node_id.clone().unwrap_or(self.node_id.clone()); @@ -101,17 +131,14 @@ impl JsonRpcServer { &self.logger, "subgraph_deploy", e, - JSON_RPC_DEPLOY_ERROR, + Self::DEPLOY_ERROR, params, )), } } /// Handler for the `subgraph_remove` endpoint. - async fn remove_handler( - &self, - params: SubgraphRemoveParams, - ) -> Result { + async fn remove_handler(&self, params: SubgraphRemoveParams) -> JsonRpcResult { info!(&self.logger, "Received subgraph_remove request"; "params" => format!("{:?}", params)); match self.registrar.remove_subgraph(params.name.clone()).await { @@ -120,20 +147,15 @@ impl JsonRpcServer { &self.logger, "subgraph_remove", e, - JSON_RPC_REMOVE_ERROR, + Self::REMOVE_ERROR, params, )), } } /// Handler for the `subgraph_assign` endpoint. - async fn reassign_handler( - &self, - params: SubgraphReassignParams, - ) -> Result { - let logger = self.logger.clone(); - - info!(logger, "Received subgraph_reassignment request"; "params" => format!("{:?}", params)); + async fn reassign_handler(&self, params: SubgraphReassignParams) -> JsonRpcResult { + info!(&self.logger, "Received subgraph_reassignment request"; "params" => format!("{:?}", params)); match self .registrar @@ -142,100 +164,23 @@ impl JsonRpcServer { { Ok(_) => Ok(Value::Null), Err(e) => Err(json_rpc_error( - &logger, + &self.logger, "subgraph_reassign", e, - JSON_RPC_REASSIGN_ERROR, + Self::REASSIGN_ERROR, params, )), } } } -impl JsonRpcServerTrait for JsonRpcServer -where - R: SubgraphRegistrar, -{ - type Server = Server; - - fn serve( - port: u16, - http_port: u16, - ws_port: u16, - registrar: Arc, - node_id: NodeId, - logger: Logger, - ) -> Result { - let logger = logger.new(o!("component" => "JsonRpcServer")); - - info!( - logger, - "Starting JSON-RPC admin server at: http://localhost:{}", port - ); - - let addr = SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), port); - - let mut handler = IoHandler::with_compatibility(Compatibility::Both); - - let arc_self = Arc::new(JsonRpcServer { - registrar, - http_port, - ws_port, - node_id, - logger, - }); - - let me = arc_self.clone(); - handler.add_method("subgraph_create", move |params: Params| { - let me = me.clone(); - async move { - let params = params.parse()?; - me.create_handler(params).await - } - }); - - let me = arc_self.clone(); - handler.add_method("subgraph_deploy", move |params: Params| { - let me = me.clone(); - async move { - let params = params.parse()?; - me.deploy_handler(params).await - } - }); - - let me = arc_self.clone(); - handler.add_method("subgraph_remove", move |params: Params| { - let me = me.clone(); - async move { - let params = params.parse()?; - me.remove_handler(params).await - } - }); - - let me = arc_self; - handler.add_method("subgraph_reassign", move |params: Params| { - let me = me.clone(); - async move { - let params = params.parse()?; - me.reassign_handler(params).await - } - }); - - ServerBuilder::new(handler) - // Enable REST API: - // POST /// - .rest_api(RestApi::Secure) - .start_http(&addr.into()) - } -} - fn json_rpc_error( logger: &Logger, operation: &str, e: SubgraphRegistrarError, code: i64, params: impl std::fmt::Debug, -) -> jsonrpc_core::Error { +) -> JsonRpcError { error!(logger, "{} failed", operation; "error" => format!("{:?}", e), "params" => format!("{:?}", params)); @@ -246,26 +191,14 @@ fn json_rpc_error( e.to_string() }; - jsonrpc_core::Error { - code: jsonrpc_core::ErrorCode::ServerError(code), + JsonRpcError::Call(CallError::Custom(ErrorObject::owned( + code as _, message, - data: None, - } -} - -pub fn parse_response(response: Value) -> Result<(), jsonrpc_core::Error> { - // serde deserialization of the `id` field to an `Id` struct is somehow - // incompatible with the `arbitrary-precision` feature which we use, so we - // need custom parsing logic. - let object = response.as_object().unwrap(); - if let Some(error) = object.get("error") { - Err(serde_json::from_value(error.clone()).unwrap()) - } else { - Ok(()) - } + None::, + ))) } -fn subgraph_routes(name: &SubgraphName, http_port: u16, ws_port: u16) -> Value { +fn subgraph_routes(name: &SubgraphName, http_port: u16, ws_port: u16) -> JsonValue { let http_base_url = ENV_VARS .external_http_base_url .clone() @@ -288,5 +221,30 @@ fn subgraph_routes(name: &SubgraphName, http_port: u16, ws_port: u16) -> Value { "subscriptions", format!("{}/subgraphs/name/{}", ws_base_url, name), ); - jsonrpc_core::to_value(map).unwrap() + + serde_json::to_value(map).expect("invalid subgraph routes") +} + +#[derive(Debug, Deserialize)] +struct SubgraphCreateParams { + name: SubgraphName, +} + +#[derive(Debug, Deserialize)] +struct SubgraphDeployParams { + name: SubgraphName, + ipfs_hash: DeploymentHash, + node_id: Option, + debug_fork: Option, +} + +#[derive(Debug, Deserialize)] +struct SubgraphRemoveParams { + name: SubgraphName, +} + +#[derive(Debug, Deserialize)] +struct SubgraphReassignParams { + ipfs_hash: DeploymentHash, + node_id: NodeId, } diff --git a/server/metrics/Cargo.toml b/server/metrics/Cargo.toml index 8f47abf3d72..67f6092123f 100644 --- a/server/metrics/Cargo.toml +++ b/server/metrics/Cargo.toml @@ -1,11 +1,8 @@ [package] name = "graph-server-metrics" -version = "0.27.0" -edition = "2021" +version.workspace = true +edition.workspace = true [dependencies] graph = { path = "../../graph" } -http = "0.2" hyper = { version = "0.14", features = ["server"] } -lazy_static = "1.2.0" -serde = "1.0" diff --git a/server/metrics/src/lib.rs b/server/metrics/src/lib.rs index 0109b862810..f67d9dadf32 100644 --- a/server/metrics/src/lib.rs +++ b/server/metrics/src/lib.rs @@ -2,41 +2,27 @@ use std::net::{Ipv4Addr, SocketAddrV4}; use std::sync::Arc; use anyhow::Error; -use graph::prometheus::{Encoder, Registry, TextEncoder}; use hyper::header::{ACCESS_CONTROL_ALLOW_ORIGIN, CONTENT_TYPE}; use hyper::service::{make_service_fn, service_fn}; use hyper::{Body, Response, Server}; use thiserror::Error; -use graph::prelude::{MetricsServer as MetricsServerTrait, *}; +use graph::prelude::*; +use graph::prometheus::{Encoder, Registry, TextEncoder}; /// Errors that may occur when starting the server. #[derive(Debug, Error)] pub enum PrometheusMetricsServeError { #[error("Bind error: {0}")] - BindError(hyper::Error), -} - -impl From for PrometheusMetricsServeError { - fn from(err: hyper::Error) -> Self { - PrometheusMetricsServeError::BindError(err) - } + BindError(#[from] hyper::Error), } +#[derive(Clone)] pub struct PrometheusMetricsServer { logger: Logger, registry: Arc, } -impl Clone for PrometheusMetricsServer { - fn clone(&self) -> Self { - Self { - logger: self.logger.clone(), - registry: self.registry.clone(), - } - } -} - impl PrometheusMetricsServer { pub fn new(logger_factory: &LoggerFactory, registry: Arc) -> Self { PrometheusMetricsServer { @@ -44,15 +30,12 @@ impl PrometheusMetricsServer { registry, } } -} - -impl MetricsServerTrait for PrometheusMetricsServer { - type ServeError = PrometheusMetricsServeError; - fn serve( + /// Creates a new Tokio task that, when spawned, brings up the index node server. + pub async fn serve( &mut self, port: u16, - ) -> Result + Send>, Self::ServeError> { + ) -> Result, PrometheusMetricsServeError> { let logger = self.logger.clone(); info!( @@ -87,6 +70,6 @@ impl MetricsServerTrait for PrometheusMetricsServer { .serve(new_service) .map_err(move |e| error!(logger, "Metrics server error"; "error" => format!("{}", e))); - Ok(Box::new(task.compat())) + Ok(task.await) } } diff --git a/server/websocket/Cargo.toml b/server/websocket/Cargo.toml index cf1f8e8d995..02466d2825b 100644 --- a/server/websocket/Cargo.toml +++ b/server/websocket/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "graph-server-websocket" -version = "0.27.0" -edition = "2021" +version.workspace = true +edition.workspace = true [dependencies] futures = "0.1.23" @@ -11,6 +11,6 @@ http = "0.2" lazy_static = "1.2.0" serde = "1.0" serde_derive = "1.0" -tokio-tungstenite = "0.14" -uuid = { version = "0.8.1", features = ["v4"] } +tokio-tungstenite = "0.17" +uuid = { version = "1.2.2", features = ["v4"] } anyhow = "1.0" diff --git a/server/websocket/src/connection.rs b/server/websocket/src/connection.rs index d3eae1c959a..ac7d885eab5 100644 --- a/server/websocket/src/connection.rs +++ b/server/websocket/src/connection.rs @@ -301,7 +301,7 @@ where let subscription = Subscription { // Subscriptions currently do not benefit from the generational cache // anyways, so don't bother passing a network. - query: Query::new(query, variables), + query: Query::new(query, variables, false), }; debug!(logger, "Start operation"; diff --git a/store/postgres/Cargo.toml b/store/postgres/Cargo.toml index 092b2cad2b5..59e9b1580fb 100644 --- a/store/postgres/Cargo.toml +++ b/store/postgres/Cargo.toml @@ -1,16 +1,16 @@ [package] name = "graph-store-postgres" -version = "0.27.0" -edition = "2021" +version.workspace = true +edition.workspace = true [dependencies] async-trait = "0.1.50" -blake3 = "1.0" +blake3 = "1.3" derive_more = { version = "0.99.17" } diesel = { version = "1.4.8", features = ["postgres", "serde_json", "numeric", "r2d2"] } # We use diesel-dynamic-schema straight from git as the project has not # made a release as a crate yet -diesel-dynamic-schema = { git = "https://github.com/diesel-rs/diesel-dynamic-schema", rev="a8ec4fb1" } +diesel-dynamic-schema = { git = "https://github.com/diesel-rs/diesel-dynamic-schema", rev = "a8ec4fb1" } diesel-derive-enum = { version = "1.1", features = ["postgres"] } diesel_migrations = "1.3.0" fallible-iterator = "0.2.0" @@ -21,22 +21,23 @@ lazy_static = "1.1" lru_time_cache = "0.11" maybe-owned = "0.3.4" postgres = "0.19.1" -openssl = "0.10.41" +openssl = "0.10.45" postgres-openssl = "0.5.0" rand = "0.8.4" serde = "1.0" -uuid = { version = "1.1.2", features = ["v4"] } +uuid = { version = "1.2.2", features = ["v4"] } stable-hash_legacy = { version = "0.3.3", package = "stable-hash" } diesel_derives = "1.4.1" -anyhow = "1.0.65" -git-testament = "0.2.0" -itertools = "0.10.3" +anyhow = "1.0.69" +git-testament = "0.2.4" +itertools = "0.10.5" pin-utils = "0.1" hex = "0.4.3" +pretty_assertions = "1.3.0" [dev-dependencies] futures = "0.3" -clap = "3.2.21" +clap = "3.2.23" graphql-parser = "0.4.0" test-store = { path = "../test-store" } hex-literal = "0.3" diff --git a/store/postgres/examples/layout.rs b/store/postgres/examples/layout.rs index 1f73934adda..1bf156d5f21 100644 --- a/store/postgres/examples/layout.rs +++ b/store/postgres/examples/layout.rs @@ -2,6 +2,7 @@ extern crate clap; extern crate graph_store_postgres; use clap::{arg, Command}; +use std::collections::BTreeSet; use std::process::exit; use std::{fs, sync::Arc}; @@ -145,7 +146,7 @@ pub fn main() { ); let site = Arc::new(make_dummy_site(subgraph, namespace, "anet".to_string())); let catalog = ensure( - Catalog::for_tests(site.clone()), + Catalog::for_tests(site.clone(), BTreeSet::new()), "Failed to construct catalog", ); let layout = ensure( diff --git a/store/postgres/migrations/2022-09-19-161239_add-raw-manifest-column/down.sql b/store/postgres/migrations/2022-09-19-161239_add-raw-manifest-column/down.sql new file mode 100644 index 00000000000..0a8ca06be1e --- /dev/null +++ b/store/postgres/migrations/2022-09-19-161239_add-raw-manifest-column/down.sql @@ -0,0 +1 @@ +alter table subgraphs.subgraph_manifest drop column raw_yaml; diff --git a/store/postgres/migrations/2022-09-19-161239_add-raw-manifest-column/up.sql b/store/postgres/migrations/2022-09-19-161239_add-raw-manifest-column/up.sql new file mode 100644 index 00000000000..c32f3e646c1 --- /dev/null +++ b/store/postgres/migrations/2022-09-19-161239_add-raw-manifest-column/up.sql @@ -0,0 +1 @@ +alter table subgraphs.subgraph_manifest add column raw_yaml text; diff --git a/store/postgres/migrations/2022-10-13-000000_track_processed_data_source/down.sql b/store/postgres/migrations/2022-10-13-000000_track_processed_data_source/down.sql new file mode 100644 index 00000000000..c9c81d36642 --- /dev/null +++ b/store/postgres/migrations/2022-10-13-000000_track_processed_data_source/down.sql @@ -0,0 +1 @@ +raise 'This migration is irreversible'; diff --git a/store/postgres/migrations/2022-10-13-000000_track_processed_data_source/up.sql b/store/postgres/migrations/2022-10-13-000000_track_processed_data_source/up.sql new file mode 100644 index 00000000000..8e91977c3bd --- /dev/null +++ b/store/postgres/migrations/2022-10-13-000000_track_processed_data_source/up.sql @@ -0,0 +1,18 @@ +-- add done_at column to data_sources$ table for each subgraph deployment +do $$ +declare + deployments cursor for + select t.table_schema as sgd + from information_schema.tables t + where t.table_schema like 'sgd%' + and t.table_name = 'data_sources$' + and not exists (select 1 from information_schema.columns c + where c.table_name = t.table_name + and c.table_schema = t.table_schema + and c.column_name = 'done_at'); +begin + for d in deployments loop + execute 'alter table ' || d.sgd || '.data_sources$ add done_at int'; + end loop; +end; +$$; \ No newline at end of file diff --git a/store/postgres/migrations/2022-11-03-213140_drop_earliest_block/down.sql b/store/postgres/migrations/2022-11-03-213140_drop_earliest_block/down.sql new file mode 100644 index 00000000000..adda576a9c0 --- /dev/null +++ b/store/postgres/migrations/2022-11-03-213140_drop_earliest_block/down.sql @@ -0,0 +1,9 @@ +alter table subgraphs.subgraph_deployment + add column earliest_ethereum_block_number numeric, + add column earliest_ethereum_block_hash bytea; + +update subgraphs.subgraph_deployment d + set earliest_ethereum_block_number = m.start_block_number, + earliest_ethereum_block_hash = m.start_block_hash + from subgraphs.subgraph_manifest m + where m.id = d.id; diff --git a/store/postgres/migrations/2022-11-03-213140_drop_earliest_block/up.sql b/store/postgres/migrations/2022-11-03-213140_drop_earliest_block/up.sql new file mode 100644 index 00000000000..083501264d3 --- /dev/null +++ b/store/postgres/migrations/2022-11-03-213140_drop_earliest_block/up.sql @@ -0,0 +1,3 @@ +alter table subgraphs.subgraph_deployment + drop column earliest_ethereum_block_number, + drop column earliest_ethereum_block_hash; diff --git a/store/postgres/migrations/2022-11-10-185105_add_has_causality_region_column/down.sql b/store/postgres/migrations/2022-11-10-185105_add_has_causality_region_column/down.sql new file mode 100644 index 00000000000..4775fa58a3b --- /dev/null +++ b/store/postgres/migrations/2022-11-10-185105_add_has_causality_region_column/down.sql @@ -0,0 +1 @@ +alter table subgraphs.subgraph_manifest drop column entities_with_causality_region; diff --git a/store/postgres/migrations/2022-11-10-185105_add_has_causality_region_column/up.sql b/store/postgres/migrations/2022-11-10-185105_add_has_causality_region_column/up.sql new file mode 100644 index 00000000000..d82d5ad2628 --- /dev/null +++ b/store/postgres/migrations/2022-11-10-185105_add_has_causality_region_column/up.sql @@ -0,0 +1 @@ +alter table subgraphs.subgraph_manifest add column if not exists entities_with_causality_region text[] not null default array[]::text[]; diff --git a/store/postgres/migrations/2023-01-24-192319_chain_size_view/down.sql b/store/postgres/migrations/2023-01-24-192319_chain_size_view/down.sql new file mode 100644 index 00000000000..027c1afb4f9 --- /dev/null +++ b/store/postgres/migrations/2023-01-24-192319_chain_size_view/down.sql @@ -0,0 +1,10 @@ +-- This file should undo anything in `up.sql` + +drop view if exists info.all_sizes; + +create view info.all_sizes as +select * from info.subgraph_sizes +union all +select * from info.table_sizes; + +drop materialized view if exists info.chain_sizes; diff --git a/store/postgres/migrations/2023-01-24-192319_chain_size_view/up.sql b/store/postgres/migrations/2023-01-24-192319_chain_size_view/up.sql new file mode 100644 index 00000000000..1d45be2359d --- /dev/null +++ b/store/postgres/migrations/2023-01-24-192319_chain_size_view/up.sql @@ -0,0 +1,34 @@ + +drop materialized view if exists info.chain_sizes; + +create materialized view info.chain_sizes as +select *, + pg_size_pretty(total_bytes) as total, + pg_size_pretty(index_bytes) as index, + pg_size_pretty(toast_bytes) as toast, + pg_size_pretty(table_bytes) as table + from ( + select *, + total_bytes-index_bytes-coalesce(toast_bytes,0) AS table_bytes + from ( + select nspname as table_schema, relname as table_name, + 'shared'::text as version, + c.reltuples as row_estimate, + pg_total_relation_size(c.oid) as total_bytes, + pg_indexes_size(c.oid) as index_bytes, + pg_total_relation_size(reltoastrelid) as toast_bytes + from pg_class c + join pg_namespace n on n.oid = c.relnamespace + where relkind = 'r' + and nspname like 'chain%' + ) a +) a with no data; + +drop view if exists info.all_sizes; + +create view info.all_sizes as +select * from info.subgraph_sizes +union all +select * from info.chain_sizes +union all +select * from info.table_sizes; diff --git a/store/postgres/src/advisory_lock.rs b/store/postgres/src/advisory_lock.rs index 6b98d51ddf9..674b96809b5 100644 --- a/store/postgres/src/advisory_lock.rs +++ b/store/postgres/src/advisory_lock.rs @@ -11,7 +11,10 @@ //! We use the following 2x 32-bit locks //! * 1, n: to lock copying of the deployment with id n in the destination //! shard +//! * 2, n: to lock the deployment with id n to make sure only one write +//! happens to it +use diesel::sql_types::Bool; use diesel::{sql_query, PgConnection, RunQueryDsl}; use graph::prelude::StoreError; @@ -44,3 +47,36 @@ pub(crate) fn unlock_copying(conn: &PgConnection, dst: &Site) -> Result<(), Stor .map(|_| ()) .map_err(StoreError::from) } + +/// Try to lock deployment `site` with a session lock. Return `true` if we +/// got the lock, and `false` if we did not. You don't want to use this +/// directly. Instead, use `deployment::with_lock` +pub(crate) fn lock_deployment_session( + conn: &PgConnection, + site: &Site, +) -> Result { + #[derive(QueryableByName)] + struct Locked { + #[sql_type = "Bool"] + locked: bool, + } + + sql_query(&format!( + "select pg_try_advisory_lock(2, {}) as locked", + site.id + )) + .get_result::(conn) + .map(|res| res.locked) + .map_err(StoreError::from) +} + +/// Release the lock acquired with `lock_deployment_session`. +pub(crate) fn unlock_deployment_session( + conn: &PgConnection, + site: &Site, +) -> Result<(), StoreError> { + sql_query(&format!("select pg_advisory_unlock(2, {})", site.id)) + .execute(conn) + .map(|_| ()) + .map_err(StoreError::from) +} diff --git a/store/postgres/src/block_range.rs b/store/postgres/src/block_range.rs index 64bb8902fc0..a8d85e4c21c 100644 --- a/store/postgres/src/block_range.rs +++ b/store/postgres/src/block_range.rs @@ -7,14 +7,17 @@ use diesel::sql_types::{Integer, Range}; use std::io::Write; use std::ops::{Bound, RangeBounds, RangeFrom}; -use graph::prelude::{BlockNumber, BlockPtr, BLOCK_NUMBER_MAX}; +use graph::prelude::{lazy_static, BlockNumber, BlockPtr, BLOCK_NUMBER_MAX}; -use crate::relational::Table; +use crate::relational::{SqlName, Table}; /// The name of the column in which we store the block range for mutable /// entities pub(crate) const BLOCK_RANGE_COLUMN: &str = "block_range"; +/// The name of the column that stores the causality region of an entity. +pub(crate) const CAUSALITY_REGION_COLUMN: &str = "causality_region"; + /// The SQL clause we use to check that an entity version is current; /// that version has an unbounded block range, but checking for /// `upper_inf(block_range)` is slow and can't use the exclusion @@ -39,6 +42,12 @@ pub(crate) const UNVERSIONED_RANGE: (Bound, Bound) = /// immutable entity is visible pub(crate) const BLOCK_COLUMN: &str = "block$"; +lazy_static! { + pub(crate) static ref BLOCK_RANGE_COLUMN_SQL: SqlName = + SqlName::verbatim(BLOCK_RANGE_COLUMN.to_string()); + pub(crate) static ref BLOCK_COLUMN_SQL: SqlName = SqlName::verbatim(BLOCK_COLUMN.to_string()); +} + /// The range of blocks for which an entity is valid. We need this struct /// to bind ranges into Diesel queries. #[derive(Clone, Debug)] diff --git a/store/postgres/src/block_store.rs b/store/postgres/src/block_store.rs index b31281abf56..79bc4db40f7 100644 --- a/store/postgres/src/block_store.rs +++ b/store/postgres/src/block_store.rs @@ -8,7 +8,7 @@ use std::{ use graph::{ blockchain::ChainIdentifier, components::store::BlockStore as BlockStoreTrait, - prelude::{error, warn, BlockNumber, BlockPtr, Logger}, + prelude::{error, warn, BlockNumber, BlockPtr, Logger, ENV_VARS}, }; use graph::{ constraint_violation, @@ -372,6 +372,7 @@ impl BlockStore { status, sender, pool, + ENV_VARS.store.recent_blocks_cache_capacity, ); if create { store.create(&ident)?; diff --git a/store/postgres/src/catalog.rs b/store/postgres/src/catalog.rs index 3a9de92bf66..6228ce66842 100644 --- a/store/postgres/src/catalog.rs +++ b/store/postgres/src/catalog.rs @@ -3,20 +3,27 @@ use diesel::{connection::SimpleConnection, prelude::RunQueryDsl, select}; use diesel::{insert_into, OptionalExtension}; use diesel::{pg::PgConnection, sql_query}; use diesel::{ - sql_types::{Array, Nullable, Text}, + sql_types::{Array, Double, Nullable, Text}, ExpressionMethods, QueryDsl, }; -use std::collections::{HashMap, HashSet}; +use graph::components::store::EntityType; +use graph::components::store::VersionStats; +use itertools::Itertools; +use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::fmt::Write; use std::iter::FromIterator; use std::sync::Arc; +use std::time::Duration; use graph::prelude::anyhow::anyhow; -use graph::{data::subgraph::schema::POI_TABLE, prelude::StoreError}; +use graph::{ + data::subgraph::schema::POI_TABLE, + prelude::{lazy_static, StoreError}, +}; use crate::connection_pool::ForeignServer; use crate::{ - primary::{Namespace, Site}, + primary::{Namespace, Site, NAMESPACE_PUBLIC}, relational::SqlName, }; @@ -31,13 +38,62 @@ table! { } } -// Readonly; we only access the name +// Readonly; not all columns are mapped +table! { + pg_namespace(oid) { + oid -> Oid, + #[sql_name = "nspname"] + name -> Text, + } +} +// Readonly; only mapping the columns we want table! { - pg_namespace(nspname) { - nspname -> Text, + pg_database(datname) { + datname -> Text, + datcollate -> Text, + datctype -> Text, } } +// Readonly; not all columns are mapped +table! { + pg_class(oid) { + oid -> Oid, + #[sql_name = "relname"] + name -> Text, + #[sql_name = "relnamespace"] + namespace -> Oid, + #[sql_name = "relpages"] + pages -> Integer, + #[sql_name = "reltuples"] + tuples -> Integer, + #[sql_name = "relkind"] + kind -> Char, + #[sql_name = "relnatts"] + natts -> Smallint, + } +} + +// Readonly; not all columns are mapped +table! { + pg_attribute(oid) { + #[sql_name = "attrelid"] + oid -> Oid, + #[sql_name = "attrelid"] + relid -> Oid, + #[sql_name = "attname"] + name -> Text, + #[sql_name = "attnum"] + num -> Smallint, + #[sql_name = "attstattarget"] + stats_target -> Integer, + } +} + +joinable!(pg_class -> pg_namespace(namespace)); +joinable!(pg_attribute -> pg_class(relid)); +allow_tables_to_appear_in_same_query!(pg_class, pg_namespace, pg_attribute); + table! { subgraphs.table_stats { id -> Integer, @@ -47,23 +103,83 @@ table! { } } -// In debug builds (for testing etc.) create exclusion constraints, in -// release builds for production, skip them -#[cfg(debug_assertions)] -const CREATE_EXCLUSION_CONSTRAINT: bool = true; -#[cfg(not(debug_assertions))] -const CREATE_EXCLUSION_CONSTRAINT: bool = false; +table! { + __diesel_schema_migrations(version) { + version -> Text, + run_on -> Timestamp, + } +} + +lazy_static! { + /// The name of the table in which Diesel records migrations + static ref MIGRATIONS_TABLE: SqlName = + SqlName::verbatim("__diesel_schema_migrations".to_string()); +} + +pub struct Locale { + collate: String, + ctype: String, + encoding: String, +} + +impl Locale { + /// Load locale information for current database + pub fn load(conn: &PgConnection) -> Result { + use diesel::dsl::sql; + use pg_database as db; + + let (collate, ctype, encoding) = db::table + .filter(db::datname.eq(sql("current_database()"))) + .select(( + db::datcollate, + db::datctype, + sql::("pg_encoding_to_char(encoding)::text"), + )) + .get_result::<(String, String, String)>(conn)?; + Ok(Locale { + collate, + ctype, + encoding, + }) + } + + pub fn suitable(&self) -> Result<(), String> { + if self.collate != "C" { + return Err(format!( + "database collation is `{}` but must be `C`", + self.collate + )); + } + if self.ctype != "C" { + return Err(format!( + "database ctype is `{}` but must be `C`", + self.ctype + )); + } + if self.encoding != "UTF8" { + return Err(format!( + "database encoding is `{}` but must be `UTF8`", + self.encoding + )); + } + Ok(()) + } +} /// Information about what tables and columns we have in the database #[derive(Debug, Clone)] pub struct Catalog { pub site: Arc, text_columns: HashMap>, + pub use_poi: bool, /// Whether `bytea` columns are indexed with just a prefix (`true`) or /// in their entirety. This influences both DDL generation and how /// queries are generated pub use_bytea_prefix: bool, + + /// Set of tables which have an explicit causality region column. + pub(crate) entities_with_causality_region: BTreeSet, } impl Catalog { @@ -72,6 +188,7 @@ impl Catalog { conn: &PgConnection, site: Arc, use_bytea_prefix: bool, + entities_with_causality_region: Vec, ) -> Result { let text_columns = get_text_columns(conn, &site.namespace)?; let use_poi = supports_proof_of_indexing(conn, &site.namespace)?; @@ -80,11 +197,15 @@ impl Catalog { text_columns, use_poi, use_bytea_prefix, + entities_with_causality_region: entities_with_causality_region.into_iter().collect(), }) } /// Return a new catalog suitable for creating a new subgraph - pub fn for_creation(site: Arc) -> Self { + pub fn for_creation( + site: Arc, + entities_with_causality_region: BTreeSet, + ) -> Self { Catalog { site, text_columns: HashMap::default(), @@ -93,18 +214,23 @@ impl Catalog { // DDL generation creates indexes for prefixes of bytes columns // see: attr-bytea-prefix use_bytea_prefix: true, + entities_with_causality_region, } } /// Make a catalog as if the given `schema` did not exist in the database /// yet. This function should only be used in situations where a database /// connection is definitely not available, such as in unit tests - pub fn for_tests(site: Arc) -> Result { + pub fn for_tests( + site: Arc, + entities_with_causality_region: BTreeSet, + ) -> Result { Ok(Catalog { site, text_columns: HashMap::default(), use_poi: false, use_bytea_prefix: true, + entities_with_causality_region, }) } @@ -116,12 +242,6 @@ impl Catalog { .map(|cols| cols.contains(column.as_str())) .unwrap_or(false) } - - /// Whether to create exclusion indexes; if false, create gist indexes - /// w/o an exclusion constraint - pub fn create_exclusion_constraint(&self) -> bool { - CREATE_EXCLUSION_CONSTRAINT - } } fn get_text_columns( @@ -154,9 +274,10 @@ fn get_text_columns( Ok(map) } -pub fn supports_proof_of_indexing( - conn: &diesel::pg::PgConnection, - namespace: &Namespace, +pub fn table_exists( + conn: &PgConnection, + namespace: &str, + table: &SqlName, ) -> Result { #[derive(Debug, QueryableByName)] struct Table { @@ -167,10 +288,20 @@ pub fn supports_proof_of_indexing( let query = "SELECT table_name FROM information_schema.tables WHERE table_schema=$1 AND table_name=$2"; let result: Vec = diesel::sql_query(query) - .bind::(namespace.as_str()) - .bind::(POI_TABLE) + .bind::(namespace) + .bind::(table.as_str()) .load(conn)?; - Ok(result.len() > 0) + Ok(!result.is_empty()) +} + +pub fn supports_proof_of_indexing( + conn: &diesel::pg::PgConnection, + namespace: &Namespace, +) -> Result { + lazy_static! { + static ref POI_TABLE_NAME: SqlName = SqlName::verbatim(POI_TABLE.to_owned()); + } + table_exists(conn, namespace.as_str(), &POI_TABLE_NAME) } pub fn current_servers(conn: &PgConnection) -> Result, StoreError> { @@ -216,7 +347,7 @@ pub fn has_namespace(conn: &PgConnection, namespace: &Namespace) -> Result(conn)?) } @@ -251,6 +382,22 @@ pub fn recreate_schema(conn: &PgConnection, nsp: &str) -> Result<(), StoreError> Ok(conn.batch_execute(&query)?) } +/// Drop the schema `nsp` and all its contents if it exists +pub fn drop_schema(conn: &PgConnection, nsp: &str) -> Result<(), StoreError> { + let query = format!("drop schema if exists {nsp} cascade;", nsp = nsp); + Ok(conn.batch_execute(&query)?) +} + +pub fn migration_count(conn: &PgConnection) -> Result { + use __diesel_schema_migrations as m; + + if !table_exists(conn, NAMESPACE_PUBLIC, &*MIGRATIONS_TABLE)? { + return Ok(0); + } + + m::table.count().get_result(conn).map_err(StoreError::from) +} + pub fn account_like(conn: &PgConnection, site: &Site) -> Result, StoreError> { use table_stats as ts; let names = ts::table @@ -501,3 +648,157 @@ pub(crate) fn drop_index( .map_err::(Into::into)?; Ok(()) } + +pub fn stats(conn: &PgConnection, namespace: &Namespace) -> Result, StoreError> { + #[derive(Queryable, QueryableByName)] + pub struct DbStats { + #[sql_type = "Integer"] + pub entities: i32, + #[sql_type = "Integer"] + pub versions: i32, + #[sql_type = "Text"] + pub tablename: String, + /// The ratio `entities / versions` + #[sql_type = "Double"] + pub ratio: f64, + } + + impl From for VersionStats { + fn from(s: DbStats) -> Self { + VersionStats { + entities: s.entities, + versions: s.versions, + tablename: s.tablename, + ratio: s.ratio, + } + } + } + + // Get an estimate of number of rows (pg_class.reltuples) and number of + // distinct entities (based on the planners idea of how many distinct + // values there are in the `id` column) See the [Postgres + // docs](https://www.postgresql.org/docs/current/view-pg-stats.html) for + // the precise meaning of n_distinct + let query = format!( + "select case when s.n_distinct < 0 then (- s.n_distinct * c.reltuples)::int4 + else s.n_distinct::int4 + end as entities, + c.reltuples::int4 as versions, + c.relname as tablename, + case when c.reltuples = 0 then 0::float8 + when s.n_distinct < 0 then (-s.n_distinct)::float8 + else greatest(s.n_distinct, 1)::float8 / c.reltuples::float8 + end as ratio + from pg_namespace n, pg_class c, pg_stats s + where n.nspname = $1 + and c.relnamespace = n.oid + and s.schemaname = n.nspname + and s.attname = 'id' + and c.relname = s.tablename + order by c.relname" + ); + + let stats = sql_query(query) + .bind::(namespace.as_str()) + .load::(conn) + .map_err(StoreError::from)?; + + Ok(stats.into_iter().map(|s| s.into()).collect()) +} + +/// Return by how much the slowest replica connected to the database `conn` +/// is lagging. The returned value has millisecond precision. If the +/// database has no replicas, return `0` +pub(crate) fn replication_lag(conn: &PgConnection) -> Result { + #[derive(Queryable, QueryableByName)] + struct Lag { + #[sql_type = "Nullable"] + ms: Option, + } + + let lag = sql_query( + "select (extract(epoch from max(greatest(write_lag, flush_lag, replay_lag)))*1000)::int as ms \ + from pg_stat_replication", + ) + .get_result::(conn)?; + + let lag = lag + .ms + .map(|ms| if ms <= 0 { 0 } else { ms as u64 }) + .unwrap_or(0); + + Ok(Duration::from_millis(lag)) +} + +pub(crate) fn cancel_vacuum(conn: &PgConnection, namespace: &Namespace) -> Result<(), StoreError> { + sql_query( + "select pg_cancel_backend(v.pid) \ + from pg_stat_progress_vacuum v, \ + pg_class c, \ + pg_namespace n \ + where v.relid = c.oid \ + and c.relnamespace = n.oid \ + and n.nspname = $1", + ) + .bind::(namespace) + .execute(conn)?; + Ok(()) +} + +pub(crate) fn default_stats_target(conn: &PgConnection) -> Result { + #[derive(Queryable, QueryableByName)] + struct Target { + #[sql_type = "Integer"] + setting: i32, + } + + let target = + sql_query("select setting::int from pg_settings where name = 'default_statistics_target'") + .get_result::(conn)?; + Ok(target.setting) +} + +pub(crate) fn stats_targets( + conn: &PgConnection, + namespace: &Namespace, +) -> Result>, StoreError> { + use pg_attribute as a; + use pg_class as c; + use pg_namespace as n; + + let targets = c::table + .inner_join(n::table) + .inner_join(a::table) + .filter(c::kind.eq("r")) + .filter(n::name.eq(namespace.as_str())) + .filter(a::num.ge(1)) + .select((c::name, a::name, a::stats_target)) + .load::<(String, String, i32)>(conn)? + .into_iter() + .map(|(table, column, target)| (SqlName::from(table), SqlName::from(column), target)); + + let map = targets.into_iter().fold( + BTreeMap::>::new(), + |mut map, (table, column, target)| { + map.entry(table).or_default().insert(column, target); + map + }, + ); + Ok(map) +} + +pub(crate) fn set_stats_target( + conn: &PgConnection, + namespace: &Namespace, + table: &SqlName, + columns: &[&SqlName], + target: i32, +) -> Result<(), StoreError> { + let columns = columns + .iter() + .map(|column| format!("alter column {} set statistics {}", column.quoted(), target)) + .join(", "); + let query = format!("alter table {}.{} {}", namespace, table.quoted(), columns); + conn.batch_execute(&query)?; + Ok(()) +} diff --git a/store/postgres/src/chain_store.rs b/store/postgres/src/chain_store.rs index 74a9101bed8..fcf2e22a8b9 100644 --- a/store/postgres/src/chain_store.rs +++ b/store/postgres/src/chain_store.rs @@ -3,13 +3,13 @@ use diesel::prelude::*; use diesel::r2d2::{ConnectionManager, PooledConnection}; use diesel::sql_types::Text; use diesel::{insert_into, update}; +use graph::parking_lot::RwLock; use std::{ collections::HashMap, convert::{TryFrom, TryInto}, iter::FromIterator, sync::Arc, - time::Duration, }; use graph::blockchain::{Block, BlockHash, ChainIdentifier}; @@ -20,9 +20,9 @@ use graph::prelude::{ BlockNumber, BlockPtr, CachedEthereumCall, CancelableError, ChainStore as ChainStoreTrait, Error, EthereumCallCache, StoreError, }; -use graph::util::timed_cache::TimedCache; use graph::{constraint_violation, ensure}; +use self::recent_blocks_cache::RecentBlocksCache; use crate::{ block_store::ChainStatus, chain_head_listener::ChainHeadUpdateSender, connection_pool::ConnectionPool, @@ -71,11 +71,14 @@ mod data { }; use std::fmt; use std::iter::FromIterator; + use std::str::FromStr; use std::{convert::TryFrom, io::Write}; use crate::transaction_receipt::RawTransactionReceipt; - pub(crate) const ETHEREUM_BLOCKS_TABLE_NAME: &'static str = "public.ethereum_blocks"; + pub(crate) const ETHEREUM_BLOCKS_TABLE_NAME: &str = "public.ethereum_blocks"; + + pub(crate) const ETHEREUM_CALL_CACHE_TABLE_NAME: &str = "public.eth_call_cache"; mod public { pub(super) use super::super::public::ethereum_networks; @@ -404,6 +407,15 @@ mod data { Ok(()) } + fn truncate_call_cache(&self, conn: &PgConnection) -> Result<(), StoreError> { + let table_name = match &self { + Storage::Shared => ETHEREUM_CALL_CACHE_TABLE_NAME, + Storage::Private(Schema { call_cache, .. }) => &call_cache.qname, + }; + conn.batch_execute(&format!("truncate table {} restart identity", table_name))?; + Ok(()) + } + /// Insert a block. If the table already contains a block with the /// same hash, then overwrite that block since it may be adding /// transaction receipts. If `overwrite` is `true`, overwrite a @@ -506,7 +518,7 @@ mod data { .select(sql::("coalesce(data -> 'block', data)")) .filter(b::network_name.eq(chain)) .filter(b::hash.eq(any(Vec::from_iter( - hashes.into_iter().map(|h| format!("{:x}", h)), + hashes.iter().map(|h| format!("{:x}", h)), )))) .load::(conn) } @@ -549,7 +561,7 @@ mod data { .filter(blocks.number().eq(number as i64)) .get_results::>(conn)? .into_iter() - .map(|hash| BlockHash::from(hash)) + .map(BlockHash::from) .collect::>()), } } @@ -784,8 +796,8 @@ mod data { conn: &PgConnection, block_ptr: BlockPtr, offset: BlockNumber, - ) -> Result, Error> { - let data = match self { + ) -> Result, Error> { + let data_and_hash = match self { Storage::Shared => { const ANCESTOR_SQL: &str = " with recursive ancestors(block_hash, block_offset) as ( @@ -810,12 +822,13 @@ mod data { match hash { None => None, - Some(hash) => Some( + Some(hash) => Some(( b::table - .filter(b::hash.eq(hash.hash)) + .filter(b::hash.eq(&hash.hash)) .select(b::data) .first::(conn)?, - ), + BlockHash::from_str(&hash.hash)?, + )), } } Storage::Private(Schema { blocks, .. }) => { @@ -843,13 +856,14 @@ mod data { .optional()?; match hash { None => None, - Some(hash) => Some( + Some(hash) => Some(( blocks .table() - .filter(blocks.hash().eq(hash.hash)) + .filter(blocks.hash().eq(&hash.hash)) .select(blocks.data()) .first::(conn)?, - ), + BlockHash::from(hash.hash), + )), } } }; @@ -860,15 +874,20 @@ mod data { // has a 'block' entry // // see also 7736e440-4c6b-11ec-8c4d-b42e99f52061 - let data = { + let data_and_ptr = { use graph::prelude::serde_json::json; - data.map(|data| match data.get("block") { - Some(_) => data, - None => json!({ "block": data, "transaction_receipts": [] }), + data_and_hash.map(|(data, hash)| { + ( + match data.get("block") { + Some(_) => data, + None => json!({ "block": data, "transaction_receipts": [] }), + }, + BlockPtr::new(hash, block_ptr.number - offset), + ) }) }; - Ok(data) + Ok(data_and_ptr) } pub(super) fn delete_blocks_before( @@ -1028,6 +1047,72 @@ mod data { .collect()) } + pub(super) fn clear_call_cache( + &self, + conn: &PgConnection, + from: Option, + to: Option, + ) -> Result<(), Error> { + if from.is_none() && to.is_none() { + // If both `from` and `to` arguments are equal to `None`, then truncation should be + // preferred over deletion as it is a faster operation. + self.truncate_call_cache(conn)?; + return Ok(()); + } + match self { + Storage::Shared => { + use public::eth_call_cache as cache; + let mut delete_stmt = diesel::delete(cache::table).into_boxed(); + if let Some(from) = from { + delete_stmt = delete_stmt.filter(cache::block_number.ge(from)); + } + if let Some(to) = to { + delete_stmt = delete_stmt.filter(cache::block_number.le(to)) + } + delete_stmt.execute(conn).map_err(Error::from)?; + Ok(()) + } + Storage::Private(Schema { call_cache, .. }) => match (from, to) { + // Because they are dynamically defined, our private call cache tables can't + // implement all the required traits for deletion. This means we can't use Diesel + // DSL with them and must rely on the `sql_query` function instead. + (Some(from), None) => { + let query = + format!("delete from {} where block_number >= $1", call_cache.qname); + sql_query(query) + .bind::(from) + .execute(conn) + .map_err(Error::from)?; + Ok(()) + } + (None, Some(to)) => { + let query = + format!("delete from {} where block_number <= $1", call_cache.qname); + sql_query(query) + .bind::(to) + .execute(conn) + .map_err(Error::from)?; + Ok(()) + } + (Some(from), Some(to)) => { + let query = format!( + "delete from {} where block_number >= $1 and block_number <= $2", + call_cache.qname + ); + sql_query(query) + .bind::(from) + .bind::(to) + .execute(conn) + .map_err(Error::from)?; + Ok(()) + } + (None, None) => { + unreachable!("truncation was handled at the beginning of this function"); + } + }, + } + } + pub(super) fn update_accessed_at( &self, conn: &PgConnection, @@ -1189,7 +1274,7 @@ mod data { let query = format!("delete from {}", qname); sql_query(query) .execute(conn) - .expect(&format!("Failed to delete {}", qname)); + .unwrap_or_else(|_| panic!("Failed to delete {}", qname)); } } } @@ -1267,7 +1352,13 @@ pub struct ChainStore { genesis_block_ptr: BlockPtr, status: ChainStatus, chain_head_update_sender: ChainHeadUpdateSender, - block_cache: TimedCache<&'static str, BlockPtr>, + // TODO: We currently only use this cache for + // [`ChainStore::ancestor_block`], but it could very well be expanded to + // also track the network's chain head and generally improve its hit rate. + // It is, however, quite challenging to keep the cache perfectly consistent + // with the database and to correctly implement invalidation. So, a + // conservative approach is acceptable. + recent_blocks_cache: RecentBlocksCache, } impl ChainStore { @@ -1278,6 +1369,7 @@ impl ChainStore { status: ChainStatus, chain_head_update_sender: ChainHeadUpdateSender, pool: ConnectionPool, + recent_blocks_cache_capacity: usize, ) -> Self { ChainStore { pool, @@ -1286,7 +1378,7 @@ impl ChainStore { genesis_block_ptr: BlockPtr::new(net_identifier.genesis_block_hash.clone(), 0), status, chain_head_update_sender, - block_cache: TimedCache::new(Duration::from_secs(5)), + recent_blocks_cache: RecentBlocksCache::new(recent_blocks_cache_capacity), } } @@ -1407,6 +1499,12 @@ impl ChainStoreTrait for ChainStore { } async fn upsert_block(&self, block: Arc) -> Result<(), Error> { + // We should always have the parent block available to us at this point. + if let Some(parent_hash) = block.parent_hash() { + self.recent_blocks_cache + .insert_block(block.ptr(), block.data().ok(), parent_hash); + } + let pool = self.pool.clone(); let network = self.chain.clone(); let storage = self.storage.clone(); @@ -1519,10 +1617,6 @@ impl ChainStoreTrait for ChainStore { _ => unreachable!(), }) .and_then(|opt: Option| opt) - .map(|head| { - self.block_cache.set("head", Arc::new(head.clone())); - head - }) }) .map_err(|e| CancelableError::from(StoreError::from(e))) }) @@ -1530,8 +1624,8 @@ impl ChainStoreTrait for ChainStore { } async fn cached_head_ptr(self: Arc) -> Result, Error> { - match self.block_cache.get("head") { - Some(head) => Ok(Some(head.as_ref().clone())), + match self.recent_blocks_cache.chain_head_ptr() { + Some(head) => Ok(Some(head)), None => self.chain_head_ptr().await, } } @@ -1609,15 +1703,24 @@ impl ChainStoreTrait for ChainStore { block_ptr.hash_hex() ); + // Check the local cache first. + if let Some(data) = self.recent_blocks_cache.get_block(&block_ptr, offset) { + return Ok(data.1.clone()); + } + + let block_ptr_clone = block_ptr.clone(); + let chain_store = self.cheap_clone(); Ok(self - .cheap_clone() .pool .with_conn(move |conn, _| { - self.storage - .ancestor_block(&conn, block_ptr, offset) - .map_err(|e| CancelableError::from(StoreError::from(e))) + chain_store + .storage + .ancestor_block(conn, block_ptr_clone, offset) + .map_err(StoreError::from) + .map_err(CancelableError::from) }) - .await?) + .await? + .map(|b| b.0)) } fn cleanup_cached_blocks( @@ -1632,6 +1735,8 @@ impl ChainStoreTrait for ChainStore { block: i32, } + self.recent_blocks_cache.clear(); + // Remove all blocks from the cache that are behind the slowest // subgraph's head block, but retain the genesis block. We stay // behind the slowest subgraph so that we do not interfere with its @@ -1708,13 +1813,18 @@ impl ChainStoreTrait for ChainStore { self.pool .with_conn(move |conn, _| { storage - .block_number(&conn, &hash) + .block_number(conn, &hash) .map(|opt| opt.map(|(number, timestamp)| (chain.clone(), number, timestamp))) - .map_err(|e| StoreError::from(e).into()) + .map_err(|e| e.into()) }) .await } + async fn clear_call_cache(&self, from: Option, to: Option) -> Result<(), Error> { + let conn = self.get_conn()?; + self.storage.clear_call_cache(&conn, from, to) + } + async fn transaction_receipts_in_block( &self, block_hash: &H256, @@ -1724,13 +1834,172 @@ impl ChainStoreTrait for ChainStore { let block_hash = block_hash.to_owned(); pool.with_conn(move |conn, _| { storage - .find_transaction_receipts_in_block(&conn, block_hash) + .find_transaction_receipts_in_block(conn, block_hash) .map_err(|e| StoreError::from(e).into()) }) .await } } +mod recent_blocks_cache { + use super::*; + use std::collections::BTreeMap; + + struct CachedBlock { + ptr: BlockPtr, + data: Option, + parent_hash: BlockHash, + } + + struct Inner { + // Note: we only ever store blocks in this cache that have a continuous + // line of ancestry between each other. Line of ancestry is verified by + // comparing parent hashes. Because of NEAR, however, we cannot + // guarantee that there are no block number gaps, as block numbers are + // not strictly continuous: + // #14 (Hash ABC1, Parent XX) -> #17 (Hash EBD2, Parent ABC1) + blocks: BTreeMap, + // We only store these many blocks. + capacity: usize, + } + + impl Inner { + fn get_block( + &self, + child: &BlockPtr, + offset: BlockNumber, + ) -> Option<(&BlockPtr, Option<&json::Value>)> { + // Before we can go find the ancestor, we need to make sure that + // we're looking for the ancestor of the right block, i.e. check if + // the hash (and number) of the child matches. + let child_is_cached = &self.blocks.get(&child.number)?.ptr == child; + + if child_is_cached { + let ancestor_block_number = child.number - offset; + let block = self.blocks.get(&ancestor_block_number)?; + Some((&block.ptr, block.data.as_ref())) + } else { + None + } + } + + fn chain_head(&self) -> Option<&BlockPtr> { + self.blocks.last_key_value().map(|b| &b.1.ptr) + } + + fn earliest_block(&self) -> Option<&CachedBlock> { + self.blocks.first_key_value().map(|b| b.1) + } + + fn evict_if_necessary(&mut self) { + while self.blocks.len() > self.capacity { + self.blocks.pop_first(); + } + } + + fn insert_block( + &mut self, + ptr: BlockPtr, + data: Option, + parent_hash: BlockHash, + ) { + fn is_parent_of(parent: &BlockPtr, child: &CachedBlock) -> bool { + child.parent_hash == parent.hash + } + + let block = CachedBlock { + ptr, + data, + parent_hash, + }; + + let Some(chain_head) = self.chain_head() else { + // We don't have anything in the cache, so we're free to store + // everything we want. + self.blocks.insert(block.ptr.number, block); + return; + }; + + if is_parent_of(chain_head, &block) { + // We have a new chain head that is a direct child of our + // previous chain head, so we get to keep all items in the + // cache. + self.blocks.insert(block.ptr.number, block); + } else if block.ptr.number > chain_head.number { + // We have a new chain head, but it's not a direct child of + // our previous chain head. This means that we must + // invalidate all the items in the cache before inserting + // this block. + self.blocks.clear(); + self.blocks.insert(block.ptr.number, block); + } else { + // Unwrap: we have checked already that the cache is not empty, + // at the beginning of this function body. + let earliest_block = self.earliest_block().unwrap(); + // Let's check if this is the parent of the earliest block in the + // cache. + if is_parent_of(&block.ptr, earliest_block) { + self.blocks.insert(block.ptr.number, block); + } + } + + self.evict_if_necessary(); + } + } + + /// We cache the most recent blocks in memory to avoid overloading the + /// database with unnecessary queries close to the chain head. We invalidate + /// blocks whenever the chain head advances. + pub struct RecentBlocksCache { + // We protect everything with a global `RwLock` to avoid data races. Ugly... + inner: RwLock, + } + + impl RecentBlocksCache { + pub fn new(capacity: usize) -> Self { + RecentBlocksCache { + inner: RwLock::new(Inner { + blocks: BTreeMap::new(), + capacity, + }), + } + } + + pub fn chain_head_ptr(&self) -> Option { + let inner = self.inner.read(); + inner.chain_head().cloned() + } + + pub fn clear(&self) { + self.inner.write().blocks.clear() + } + + pub fn get_block( + &self, + child: &BlockPtr, + offset: BlockNumber, + ) -> Option<(BlockPtr, Option)> { + self.inner + .read() + .get_block(child, offset) + .map(|b| (b.0.clone(), b.1.cloned())) + } + + /// Tentatively caches the `ancestor` of a [`BlockPtr`] (`child`), together with + /// its associated `data`. Note that for this to work, `child` must be + /// in the cache already. The first block in the cache should be + /// inserted via [`RecentBlocksCache::set_chain_head`]. + pub fn insert_block( + &self, + ptr: BlockPtr, + data: Option, + parent_hash: BlockHash, + ) { + self.inner.write().insert_block(ptr, data, parent_hash) + } + } +} + fn try_parse_timestamp(ts: Option) -> Result, StoreError> { let ts = match ts { Some(str) => str, @@ -1783,7 +2052,7 @@ impl EthereumCallCache for ChainStore { fn get_calls_in_block(&self, block: BlockPtr) -> Result, Error> { let conn = &*self.get_conn()?; - conn.transaction::<_, Error, _>(|| Ok(self.storage.get_calls_in_block(conn, block)?)) + conn.transaction::<_, Error, _>(|| self.storage.get_calls_in_block(conn, block)) } fn set_call( diff --git a/store/postgres/src/connection_pool.rs b/store/postgres/src/connection_pool.rs index d1f48a5bf48..f688db12418 100644 --- a/store/postgres/src/connection_pool.rs +++ b/store/postgres/src/connection_pool.rs @@ -25,7 +25,7 @@ use graph::{ use std::fmt::{self, Write}; use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; use std::time::Duration; use std::{collections::HashMap, sync::RwLock}; @@ -86,10 +86,15 @@ impl ForeignServer { let password = String::from_utf8( config .get_password() - .ok_or_else(|| anyhow!("could not find user in `{}`", SafeDisplay(postgres_url)))? + .ok_or_else(|| { + anyhow!( + "could not find password in `{}`; you must provide one.", + SafeDisplay(postgres_url) + ) + })? .into(), )?; - let port = config.get_ports().get(0).cloned().unwrap_or(5432u16); + let port = config.get_ports().first().cloned().unwrap_or(5432u16); let dbname = config .get_dbname() .map(|s| s.to_string()) @@ -158,9 +163,6 @@ impl ForeignServer { /// Map key tables from the primary into our local schema. If we are the /// primary, set them up as views. - /// - /// We recreate this mapping on every server start so that migrations that - /// change one of the mapped tables actually show up in the imported tables fn map_primary(conn: &PgConnection, shard: &Shard) -> Result<(), StoreError> { catalog::recreate_schema(conn, Self::PRIMARY_PUBLIC)?; @@ -226,7 +228,7 @@ const FDW_IDLE_TIMEOUT: Duration = Duration::from_secs(60); enum PoolState { /// A connection pool, and all the servers for which we need to /// establish fdw mappings when we call `setup` on the pool - Created(Arc, Arc>), + Created(Arc, Arc), /// The pool has been successfully set up Ready(Arc), /// The pool has been disabled by setting its size to 0 @@ -300,7 +302,7 @@ impl PoolStateTracker { } impl ConnectionPool { - pub fn create( + fn create( shard_name: &str, pool_name: PoolName, postgres_url: String, @@ -308,7 +310,7 @@ impl ConnectionPool { fdw_pool_size: Option, logger: &Logger, registry: Arc, - servers: Arc>, + coord: Arc, ) -> ConnectionPool { let state_tracker = PoolStateTracker::new(); let shard = @@ -330,7 +332,7 @@ impl ConnectionPool { if pool_name.is_replica() { PoolState::Ready(Arc::new(pool)) } else { - PoolState::Created(Arc::new(pool), servers) + PoolState::Created(Arc::new(pool), coord) } } }; @@ -676,7 +678,7 @@ impl HandleEvent for EventHandler { #[derive(Clone)] pub struct PoolInner { logger: Logger, - shard: Shard, + pub shard: Shard, pool: Pool>, // A separate pool for connections that will use foreign data wrappers. // Once such a connection accesses a foreign table, Postgres keeps a @@ -968,7 +970,7 @@ impl PoolInner { /// # Panics /// /// If any errors happen during the migration, the process panics - pub fn setup(&self, servers: Arc>) -> Result<(), StoreError> { + fn setup(&self, coord: Arc) -> Result<(), StoreError> { fn die(logger: &Logger, msg: &'static str, err: &dyn std::fmt::Display) -> ! { crit!(logger, "{}", msg; "error" => format!("{:#}", err)); panic!("{}: {}", msg, err); @@ -977,17 +979,54 @@ impl PoolInner { let pool = self.clone(); let conn = self.get().map_err(|_| StoreError::DatabaseUnavailable)?; + let start = Instant::now(); + advisory_lock::lock_migration(&conn) .unwrap_or_else(|err| die(&pool.logger, "failed to get migration lock", &err)); + // This code can cause a race in database setup: if pool A has had + // schema changes and pool B then tries to map tables from pool A, + // but does so before the concurrent thread running this code for + // pool B has at least finished `configure_fdw`, mapping tables will + // fail. In that case, the node must be restarted. The restart is + // guaranteed because this failure will lead to a panic in the setup + // for pool A + // + // This code can also leave the table mappings in a state where they + // have not been updated if the process is killed after migrating + // the schema but before finishing remapping in all shards. + // Addressing that would require keeping track of the need to remap + // in the database instead of just in memory let result = pool - .configure_fdw(servers.as_ref()) + .configure_fdw(coord.servers.as_ref()) .and_then(|()| migrate_schema(&pool.logger, &conn)) - .and_then(|()| pool.map_primary()) - .and_then(|()| pool.map_metadata(servers.as_ref())); + .and_then(|had_migrations| { + if had_migrations { + coord.propagate_schema_change(&self.shard) + } else { + Ok(()) + } + }); + debug!(&pool.logger, "Release migration lock"); advisory_lock::unlock_migration(&conn).unwrap_or_else(|err| { die(&pool.logger, "failed to release migration lock", &err); }); result.unwrap_or_else(|err| die(&pool.logger, "migrations failed", &err)); + + // Locale check + if let Err(msg) = catalog::Locale::load(&conn)?.suitable() { + if &self.shard == &*PRIMARY_SHARD && primary::is_empty(&conn)? { + die( + &pool.logger, + "Database does not use C locale. \ + Please check the graph-node documentation for how to set up the database locale", + &msg, + ); + } else { + warn!(pool.logger, "{}.\nPlease check the graph-node documentation for how to set up the database locale", msg); + } + } + + debug!(&pool.logger, "Setup finished"; "setup_time_s" => start.elapsed().as_secs()); Ok(()) } @@ -1018,17 +1057,6 @@ impl PoolInner { }) } - /// Map key tables from the primary into our local schema. If we are the - /// primary, set them up as views. - /// - /// We recreate this mapping on every server start so that migrations that - /// change one of the mapped tables actually show up in the imported tables - fn map_primary(&self) -> Result<(), StoreError> { - info!(&self.logger, "Mapping primary"); - let conn = self.get()?; - conn.transaction(|| ForeignServer::map_primary(&conn, &self.shard)) - } - /// Copy the data from key tables in the primary into our local schema /// so it can be used as a fallback when the primary goes down pub async fn mirror_primary_tables(&self) -> Result<(), StoreError> { @@ -1043,17 +1071,25 @@ impl PoolInner { .await } - // Map some tables from the `subgraphs` metadata schema from foreign - // servers to ourselves. The mapping is recreated on every server start - // so that we pick up possible schema changes in the mappings - fn map_metadata(&self, servers: &[ForeignServer]) -> Result<(), StoreError> { - let conn = self.get()?; - conn.transaction(|| { - for server in servers.iter().filter(|server| server.shard != self.shard) { - server.map_metadata(&conn)?; - } - Ok(()) - }) + // The foreign server `server` had schema changes, and we therefore need + // to remap anything that we are importing via fdw to make sure we are + // using this updated schema + pub fn remap(&self, server: &ForeignServer) -> Result<(), StoreError> { + if &server.shard == &*PRIMARY_SHARD { + info!(&self.logger, "Mapping primary"); + let conn = self.get()?; + conn.transaction(|| ForeignServer::map_primary(&conn, &self.shard))?; + } + if &server.shard != &self.shard { + info!( + &self.logger, + "Mapping metadata from {}", + server.shard.as_str() + ); + let conn = self.get()?; + conn.transaction(|| server.map_metadata(&conn))?; + } + Ok(()) } } @@ -1064,19 +1100,22 @@ embed_migrations!("./migrations"); /// When multiple `graph-node` processes start up at the same time, we ensure /// that they do not run migrations in parallel by using `blocking_conn` to /// serialize them. The `conn` is used to run the actual migration. -fn migrate_schema(logger: &Logger, conn: &PgConnection) -> Result<(), StoreError> { +fn migrate_schema(logger: &Logger, conn: &PgConnection) -> Result { // Collect migration logging output let mut output = vec![]; + let old_count = catalog::migration_count(conn)?; + info!(logger, "Running migrations"); let result = embedded_migrations::run_with_output(conn, &mut output); info!(logger, "Migrations finished"); + let had_migrations = catalog::migration_count(conn)? != old_count; + // If there was any migration output, log it now let msg = String::from_utf8(output).unwrap_or_else(|_| String::from("")); let msg = msg.trim(); - let has_output = !msg.is_empty(); - if has_output { + if !msg.is_empty() { let msg = msg.replace('\n', " "); if let Err(e) = result { error!(logger, "Postgres migration error"; "output" => msg); @@ -1086,13 +1125,98 @@ fn migrate_schema(logger: &Logger, conn: &PgConnection) -> Result<(), StoreError } } - if has_output { - // We take getting output as a signal that a migration was actually - // run, which is not easy to tell from the Diesel API, and reset the - // query statistics since a schema change makes them not all that - // useful. An error here is not serious and can be ignored. + if had_migrations { + // Reset the query statistics since a schema change makes them not + // all that useful. An error here is not serious and can be ignored. conn.batch_execute("select pg_stat_statements_reset()").ok(); } - Ok(()) + Ok(had_migrations) +} + +/// Helper to coordinate propagating schema changes from the database that +/// changes schema to all other shards so they can update their fdw mappings +/// of tables imported from that shard +pub struct PoolCoordinator { + pools: Mutex>>, + servers: Arc>, +} + +impl PoolCoordinator { + pub fn new(servers: Arc>) -> Self { + Self { + pools: Mutex::new(HashMap::new()), + servers, + } + } + + pub fn create_pool( + self: Arc, + logger: &Logger, + name: &str, + pool_name: PoolName, + postgres_url: String, + pool_size: u32, + fdw_pool_size: Option, + registry: Arc, + ) -> ConnectionPool { + let is_writable = !pool_name.is_replica(); + + let pool = ConnectionPool::create( + name, + pool_name, + postgres_url, + pool_size, + fdw_pool_size, + logger, + registry, + self.cheap_clone(), + ); + + // Ignore non-writable pools (replicas), there is no need (and no + // way) to coordinate schema changes with them + if is_writable { + // It is safe to take this lock here since nobody has seen the pool + // yet. We remember the `PoolInner` so that later, when we have to + // call `remap()`, we do not have to take this lock as that will be + // already held in `get_ready()` + match &*pool.inner.lock(logger) { + PoolState::Created(inner, _) | PoolState::Ready(inner) => { + self.pools + .lock() + .unwrap() + .insert(pool.shard.clone(), inner.clone()); + } + PoolState::Disabled => { /* nothing to do */ } + } + } + pool + } + + /// Propagate changes to the schema in `shard` to all other pools. Those + /// other pools will then recreate any tables that they imported from + /// `shard` + fn propagate_schema_change(&self, shard: &Shard) -> Result<(), StoreError> { + let server = self + .servers + .iter() + .find(|server| &server.shard == shard) + .ok_or_else(|| constraint_violation!("unknown shard {shard}"))?; + + for pool in self.pools.lock().unwrap().values() { + if let Err(e) = pool.remap(server) { + error!(pool.logger, "Failed to map imports from {}", server.shard; "error" => e.to_string()); + return Err(e); + } + } + Ok(()) + } + + pub fn pools(&self) -> Vec> { + self.pools.lock().unwrap().values().cloned().collect() + } + + pub fn servers(&self) -> Arc> { + self.servers.clone() + } } diff --git a/store/postgres/src/copy.rs b/store/postgres/src/copy.rs index 9f8658098d5..44741e61d31 100644 --- a/store/postgres/src/copy.rs +++ b/store/postgres/src/copy.rs @@ -13,6 +13,7 @@ //! `graph-node` was restarted while the copy was running. use std::{ convert::TryFrom, + io::Write, sync::Arc, time::{Duration, Instant}, }; @@ -20,20 +21,24 @@ use std::{ use diesel::{ dsl::sql, insert_into, + pg::Pg, r2d2::{ConnectionManager, PooledConnection}, - select, sql_query, - sql_types::Integer, + select, + serialize::Output, + sql_query, + sql_types::{BigInt, Integer}, + types::{FromSql, ToSql}, update, Connection as _, ExpressionMethods, OptionalExtension, PgConnection, QueryDsl, RunQueryDsl, }; use graph::{ components::store::EntityType, constraint_violation, - prelude::{info, o, warn, BlockNumber, BlockPtr, Logger, StoreError}, + prelude::{info, o, warn, BlockNumber, BlockPtr, Logger, StoreError, ENV_VARS}, }; use crate::{ - advisory_lock, + advisory_lock, catalog, dynds::DataSourcesTable, primary::{DeploymentId, Site}, }; @@ -46,9 +51,19 @@ const INITIAL_BATCH_SIZE: i64 = 10_000; /// arrays can be large and large arrays will slow down copying a lot. We /// therefore tread lightly in that case const INITIAL_BATCH_SIZE_LIST: i64 = 100; -const TARGET_DURATION: Duration = Duration::from_secs(5 * 60); + const LOG_INTERVAL: Duration = Duration::from_secs(3 * 60); +/// If replicas are lagging by more than this, the copying code will pause +/// for a while to allow replicas to catch up +const MAX_REPLICATION_LAG: Duration = Duration::from_secs(60); +/// If replicas need to catch up, do not resume copying until the lag is +/// less than this +const ACCEPTABLE_REPLICATION_LAG: Duration = Duration::from_secs(30); +/// When replicas are lagging too much, sleep for this long before checking +/// the lag again +const REPLICATION_SLEEP: Duration = Duration::from_secs(10); + table! { subgraphs.copy_state(dst) { // deployment_schemas.id @@ -89,7 +104,7 @@ table! { } } -#[derive(Copy, Clone, PartialEq)] +#[derive(Copy, Clone, PartialEq, Eq)] pub enum Status { Finished, Cancelled, @@ -202,17 +217,17 @@ impl CopyState { }) }) .collect::>()?; - tables.sort_by_key(|table| table.dst.object.to_string()); + tables.sort_by_key(|table| table.batch.dst.object.to_string()); let values = tables .iter() .map(|table| { ( - cts::entity_type.eq(table.dst.object.as_str()), + cts::entity_type.eq(table.batch.dst.object.as_str()), cts::dst.eq(dst.site.id), - cts::next_vid.eq(table.next_vid), - cts::target_vid.eq(table.target_vid), - cts::batch_size.eq(table.batch_size), + cts::next_vid.eq(table.batch.next_vid), + cts::target_vid.eq(table.batch.target_vid), + cts::batch_size.eq(table.batch.batch_size.size), ) }) .collect::>(); @@ -268,14 +283,109 @@ impl CopyState { } } -struct TableState { - dst_site: Arc, +/// Track the desired size of a batch in such a way that doing the next +/// batch gets close to TARGET_DURATION for the time it takes to copy one +/// batch, but don't step up the size by more than 2x at once +#[derive(Debug, Queryable)] +pub(crate) struct AdaptiveBatchSize { + pub size: i64, +} + +impl AdaptiveBatchSize { + pub fn new(table: &Table) -> Self { + let size = if table.columns.iter().any(|col| col.is_list()) { + INITIAL_BATCH_SIZE_LIST + } else { + INITIAL_BATCH_SIZE + }; + + Self { size } + } + + // adjust batch size by trying to extrapolate in such a way that we + // get close to TARGET_DURATION for the time it takes to copy one + // batch, but don't step up batch_size by more than 2x at once + pub fn adapt(&mut self, duration: Duration) { + // Avoid division by zero + let duration = duration.as_millis().max(1); + let new_batch_size = self.size as f64 + * ENV_VARS.store.batch_target_duration.as_millis() as f64 + / duration as f64; + self.size = (2 * self.size).min(new_batch_size.round() as i64); + } +} + +impl ToSql for AdaptiveBatchSize { + fn to_sql(&self, out: &mut Output) -> diesel::serialize::Result { + >::to_sql(&self.size, out) + } +} + +impl FromSql for AdaptiveBatchSize { + fn from_sql(bytes: Option<&[u8]>) -> diesel::deserialize::Result { + let size = >::from_sql(bytes)?; + Ok(AdaptiveBatchSize { size }) + } +} + +/// A helper to copy entities from one table to another in batches that are +/// small enough to not interfere with the rest of the operations happening +/// in the database. The `src` and `dst` table must have the same structure +/// so that we can copy rows from one to the other with very little +/// transformation. See `CopyEntityBatchQuery` for the details of what +/// exactly that means +pub(crate) struct BatchCopy { src: Arc
, dst: Arc
, /// The `vid` of the next entity version that we will copy next_vid: i64, + /// The last `vid` that should be copied target_vid: i64, - batch_size: i64, + batch_size: AdaptiveBatchSize, +} + +impl BatchCopy { + pub fn new(src: Arc
, dst: Arc
, first_vid: i64, last_vid: i64) -> Self { + let batch_size = AdaptiveBatchSize::new(&dst); + + Self { + src, + dst, + next_vid: first_vid, + target_vid: last_vid, + batch_size, + } + } + + /// Copy one batch of entities and update internal state so that the + /// next call to `run` will copy the next batch + pub fn run(&mut self, conn: &PgConnection) -> Result { + let start = Instant::now(); + + // Copy all versions with next_vid <= vid <= next_vid + batch_size - 1, + // but do not go over target_vid + let last_vid = (self.next_vid + self.batch_size.size - 1).min(self.target_vid); + rq::CopyEntityBatchQuery::new(self.dst.as_ref(), &self.src, self.next_vid, last_vid)? + .execute(conn)?; + + let duration = start.elapsed(); + + // remember how far we got + self.next_vid = last_vid + 1; + + self.batch_size.adapt(duration); + + Ok(duration) + } + + pub fn finished(&self) -> bool { + self.next_vid > self.target_vid + } +} + +struct TableState { + batch: BatchCopy, + dst_site: Arc, duration_ms: i64, } @@ -309,25 +419,15 @@ impl TableState { .map(|v| v.max_vid) .unwrap_or(-1); - let batch_size = if dst.columns.iter().any(|col| col.is_list()) { - INITIAL_BATCH_SIZE_LIST - } else { - INITIAL_BATCH_SIZE - }; - Ok(Self { + batch: BatchCopy::new(src, dst, 0, target_vid), dst_site, - src, - dst, - next_vid: 0, - target_vid, - batch_size, duration_ms: 0, }) } fn finished(&self) -> bool { - self.next_vid > self.target_vid + self.batch.finished() } fn load( @@ -373,7 +473,7 @@ impl TableState { .load::<(i32, String, i64, i64, i64, i64)>(conn)? .into_iter() .map( - |(id, entity_type, current_vid, target_vid, batch_size, duration_ms)| { + |(id, entity_type, current_vid, target_vid, size, duration_ms)| { let entity_type = EntityType::new(entity_type); let src = resolve_entity(src_layout, "source", &entity_type, dst_layout.site.id, id); @@ -385,15 +485,18 @@ impl TableState { id, ); match (src, dst) { - (Ok(src), Ok(dst)) => Ok(TableState { - dst_site: dst_layout.site.clone(), - src, - dst, - next_vid: current_vid, - target_vid, - batch_size, - duration_ms, - }), + (Ok(src), Ok(dst)) => { + let mut batch = BatchCopy::new(src, dst, current_vid, target_vid); + let batch_size = AdaptiveBatchSize { size }; + + batch.batch_size = batch_size; + + Ok(TableState { + batch, + dst_site: dst_layout.site.clone(), + duration_ms, + }) + } (Err(e), _) => Err(e), (_, Err(e)) => Err(e), } @@ -420,20 +523,20 @@ impl TableState { update( cts::table .filter(cts::dst.eq(self.dst_site.id)) - .filter(cts::entity_type.eq(self.dst.object.as_str())), + .filter(cts::entity_type.eq(self.batch.dst.object.as_str())), ) .set(cts::started_at.eq(sql("now()"))) .execute(conn)?; } let values = ( - cts::next_vid.eq(self.next_vid), - cts::batch_size.eq(self.batch_size), + cts::next_vid.eq(self.batch.next_vid), + cts::batch_size.eq(self.batch.batch_size.size), cts::duration_ms.eq(self.duration_ms), ); update( cts::table .filter(cts::dst.eq(self.dst_site.id)) - .filter(cts::entity_type.eq(self.dst.object.as_str())), + .filter(cts::entity_type.eq(self.batch.dst.object.as_str())), ) .set(values) .execute(conn)?; @@ -446,7 +549,7 @@ impl TableState { update( cts::table .filter(cts::dst.eq(self.dst_site.id)) - .filter(cts::entity_type.eq(self.dst.object.as_str())), + .filter(cts::entity_type.eq(self.batch.dst.object.as_str())), ) .set(cts::finished_at.eq(sql("now()"))) .execute(conn)?; @@ -472,26 +575,9 @@ impl TableState { } fn copy_batch(&mut self, conn: &PgConnection) -> Result { - let start = Instant::now(); - - // Copy all versions with next_vid <= vid <= next_vid + batch_size - 1, - // but do not go over target_vid - let first_batch = self.next_vid == 0; - let last_vid = (self.next_vid + self.batch_size - 1).min(self.target_vid); - rq::CopyEntityBatchQuery::new(self.dst.as_ref(), &self.src, self.next_vid, last_vid)? - .execute(conn)?; - - let duration = start.elapsed(); + let first_batch = self.batch.next_vid == 0; - // remember how far we got - self.next_vid = last_vid + 1; - - // adjust batch size by trying to extrapolate in such a way that we - // get close to TARGET_DURATION for the time it takes to copy one - // batch, but don't step up batch_size by more than 2x at once - let new_batch_size = self.batch_size as f64 * TARGET_DURATION.as_millis() as f64 - / duration.as_millis() as f64; - self.batch_size = (2 * self.batch_size).min(new_batch_size.round() as i64); + let duration = self.batch.run(conn)?; self.record_progress(conn, duration, first_batch)?; @@ -515,7 +601,11 @@ struct CopyProgress<'a> { impl<'a> CopyProgress<'a> { fn new(logger: &'a Logger, state: &CopyState) -> Self { - let target_vid: i64 = state.tables.iter().map(|table| table.target_vid).sum(); + let target_vid: i64 = state + .tables + .iter() + .map(|table| table.batch.target_vid) + .sum(); Self { logger, last_log: Instant::now(), @@ -547,23 +637,23 @@ impl<'a> CopyProgress<'a> { } } - fn update(&mut self, table: &TableState) { + fn update(&mut self, batch: &BatchCopy) { if self.last_log.elapsed() > LOG_INTERVAL { info!( self.logger, "Copied {:.2}% of `{}` entities ({}/{} entity versions), {:.2}% of overall data", - Self::progress_pct(table.next_vid, table.target_vid), - table.dst.object, - table.next_vid, - table.target_vid, - Self::progress_pct(self.current_vid + table.next_vid, self.target_vid) + Self::progress_pct(batch.next_vid, batch.target_vid), + batch.dst.object, + batch.next_vid, + batch.target_vid, + Self::progress_pct(self.current_vid + batch.next_vid, self.target_vid) ); self.last_log = Instant::now(); } } - fn table_finished(&mut self, table: &TableState) { - self.current_vid += table.next_vid; + fn table_finished(&mut self, batch: &BatchCopy) { + self.current_vid += batch.next_vid; } fn finished(&self) { @@ -583,6 +673,8 @@ pub struct Connection { src: Arc, dst: Arc, target_block: BlockPtr, + src_manifest_idx_and_name: Vec<(i32, String)>, + dst_manifest_idx_and_name: Vec<(i32, String)>, } impl Connection { @@ -598,6 +690,8 @@ impl Connection { src: Arc, dst: Arc, target_block: BlockPtr, + src_manifest_idx_and_name: Vec<(i32, String)>, + dst_manifest_idx_and_name: Vec<(i32, String)>, ) -> Result { let logger = logger.new(o!("dst" => dst.site.namespace.to_string())); @@ -623,6 +717,8 @@ impl Connection { src, dst, target_block, + src_manifest_idx_and_name, + dst_manifest_idx_and_name, }) } @@ -639,6 +735,8 @@ impl Connection { &self.conn, &DataSourcesTable::new(state.dst.site.namespace.clone()), state.target_block.number, + &self.src_manifest_idx_and_name, + &self.dst_manifest_idx_and_name, )?; } Ok(()) @@ -665,13 +763,31 @@ impl Connection { if table.is_cancelled(&self.conn)? { return Ok(Status::Cancelled); } + + // Pause copying if replication is lagging behind to avoid + // overloading replicas + let mut lag = catalog::replication_lag(&self.conn)?; + if lag > MAX_REPLICATION_LAG { + loop { + info!(&self.logger, + "Replicas are lagging too much; pausing copying for {}s to allow them to catch up", + REPLICATION_SLEEP.as_secs(); + "lag_s" => lag.as_secs()); + std::thread::sleep(REPLICATION_SLEEP); + lag = catalog::replication_lag(&self.conn)?; + if lag <= ACCEPTABLE_REPLICATION_LAG { + break; + } + } + } + let status = self.transaction(|conn| table.copy_batch(conn))?; if status == Status::Cancelled { return Ok(status); } - progress.update(table); + progress.update(&table.batch); } - progress.table_finished(table); + progress.table_finished(&table.batch); } self.copy_private_data_sources(&state)?; diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index 68435c536a2..32b7b68c41f 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -1,7 +1,7 @@ //! Utilities for dealing with deployment metadata. Any connection passed //! into these methods must be for the shard that holds the actual //! deployment data and metadata -use crate::{detail::GraphNodeVersion, primary::DeploymentId}; +use crate::{advisory_lock, detail::GraphNodeVersion, primary::DeploymentId}; use diesel::{ connection::SimpleConnection, dsl::{count, delete, insert_into, select, sql, update}, @@ -13,17 +13,23 @@ use diesel::{ sql_query, sql_types::{Nullable, Text}, }; -use graph::data::subgraph::{ - schema::{DeploymentCreate, SubgraphManifestEntity}, - SubgraphFeature, +use graph::{blockchain::block_stream::FirehoseCursor, data::subgraph::schema::SubgraphError}; +use graph::{ + components::store::EntityType, + prelude::{ + anyhow, bigdecimal::ToPrimitive, hex, web3::types::H256, BigDecimal, BlockNumber, BlockPtr, + DeploymentHash, DeploymentState, Schema, StoreError, + }, }; -use graph::prelude::{ - anyhow, bigdecimal::ToPrimitive, hex, web3::types::H256, BigDecimal, BlockNumber, BlockPtr, - DeploymentHash, DeploymentState, Schema, StoreError, +use graph::{ + data::subgraph::{ + schema::{DeploymentCreate, SubgraphManifestEntity}, + SubgraphFeature, + }, + util::backoff::ExponentialBackoff, }; -use graph::{blockchain::block_stream::FirehoseCursor, data::subgraph::schema::SubgraphError}; use stable_hash_legacy::crypto::SetHasher; -use std::{collections::BTreeSet, convert::TryFrom, ops::Bound}; +use std::{collections::BTreeSet, convert::TryFrom, ops::Bound, time::Duration}; use std::{str::FromStr, sync::Arc}; use crate::connection_pool::ForeignServer; @@ -67,9 +73,6 @@ table! { synced -> Bool, fatal_error -> Nullable, non_fatal_errors -> Array, - // Not used anymore; only written to keep backwards compatible - earliest_ethereum_block_hash -> Nullable, - earliest_ethereum_block_number -> Nullable, earliest_block_number -> Integer, latest_ethereum_block_hash -> Nullable, latest_ethereum_block_number -> Nullable, @@ -113,6 +116,11 @@ table! { /// Parent of the smallest start block from the manifest start_block_number -> Nullable, start_block_hash -> Nullable, + raw_yaml -> Nullable, + + // Entity types that have a `causality_region` column. + // Names stored as present in the schema, not in snake case. + entities_with_causality_region -> Array, } } @@ -256,7 +264,7 @@ pub fn manifest_info( .filter(sm::id.eq(site.id)) .first(conn)?; Schema::parse(s.as_str(), site.deployment.clone()) - .map_err(|e| StoreError::Unknown(e)) + .map_err(StoreError::Unknown) .map(|schema| (schema, description, repository, spec_version)) } @@ -275,6 +283,22 @@ pub fn features(conn: &PgConnection, site: &Site) -> Result Result<(), StoreError> { + use subgraph_manifest as sm; + + update(sm::table.filter(sm::id.eq(site.id))) + .filter(sm::raw_yaml.is_null()) + .set(sm::raw_yaml.eq(raw_yaml)) + .execute(conn) + .map(|_| ()) + .map_err(|e| e.into()) +} + pub fn transact_block( conn: &PgConnection, site: &Site, @@ -320,7 +344,7 @@ pub fn transact_block( // No matching rows were found. This is an error. By the filter conditions, this can only be // due to a missing deployment (which `block_ptr` catches) or duplicate block processing. - 0 => match block_ptr(&conn, &site.deployment)? { + 0 => match block_ptr(conn, &site.deployment)? { Some(block_ptr_from) if block_ptr_from.number >= ptr.number => Err( StoreError::DuplicateBlockProcessing(site.deployment.clone(), ptr.number), ), @@ -453,11 +477,15 @@ pub fn initialize_block_ptr(conn: &PgConnection, site: &Site) -> Result<(), Stor let needs_init = d::table .filter(d::id.eq(site.id)) - .filter(d::latest_ethereum_block_hash.is_null()) - .select(d::id) - .first::(conn) - .optional()? - .is_some(); + .select(d::latest_ethereum_block_hash) + .first::>>(conn) + .map_err(|e| { + constraint_violation!( + "deployment sgd{} must have been created before calling initialize_block_ptr but we got {}", + site.id, e + ) + })? + .is_none(); if needs_init { if let (Some(hash), Some(number)) = m::table @@ -748,12 +776,26 @@ pub(crate) fn health(conn: &PgConnection, id: DeploymentId) -> Result Result, StoreError> { + use subgraph_manifest as sm; + + sm::table + .filter(sm::id.eq(id)) + .select(sm::entities_with_causality_region) + .get_result(conn) + .map_err(|e| e.into()) +} + /// Reverts the errors and updates the subgraph health if necessary. pub(crate) fn revert_subgraph_errors( conn: &PgConnection, id: &DeploymentHash, reverted_block: BlockNumber, ) -> Result<(), StoreError> { + use subgraph_deployment as d; use subgraph_error as e; let lower_geq = format!("lower({}) >= ", BLOCK_RANGE_COLUMN); @@ -767,7 +809,21 @@ pub(crate) fn revert_subgraph_errors( // The result will be the same at `reverted_block` or `reverted_block - 1` since the errors at // `reverted_block` were just deleted, but semantically we care about `reverted_block - 1` which // is the block being reverted to. - check_health(conn, id, reverted_block - 1) + check_health(conn, id, reverted_block - 1)?; + + // If the deployment is failed in both `failed` and `status` columns, + // update both values respectively to `false` and `healthy`. Basically + // unfail the statuses. + update( + d::table + .filter(d::deployment.eq(id.as_str())) + .filter(d::failed.eq(true)) + .filter(d::health.eq(SubgraphHealth::Failed)), + ) + .set((d::failed.eq(false), d::health.eq(SubgraphHealth::Healthy))) + .execute(conn) + .map(|_| ()) + .map_err(StoreError::from) } pub(crate) fn delete_error(conn: &PgConnection, error_id: &str) -> Result<(), StoreError> { @@ -889,13 +945,16 @@ pub fn create_deployment( repository, features, schema, + raw_yaml, + entities_with_causality_region, }, - earliest_block, + start_block, graft_base, graft_block, debug_fork, } = deployment; - let earliest_block_number = earliest_block.as_ref().map(|ptr| ptr.number).unwrap_or(0); + let earliest_block_number = start_block.as_ref().map(|ptr| ptr.number).unwrap_or(0); + let entities_with_causality_region = Vec::from_iter(entities_with_causality_region.into_iter()); let deployment_values = ( d::id.eq(site.id), @@ -905,8 +964,6 @@ pub fn create_deployment( d::health.eq(SubgraphHealth::Healthy), d::fatal_error.eq::>(None), d::non_fatal_errors.eq::>(vec![]), - d::earliest_ethereum_block_hash.eq(b(&earliest_block)), - d::earliest_ethereum_block_number.eq(n(&earliest_block)), d::earliest_block_number.eq(earliest_block_number), d::latest_ethereum_block_hash.eq(sql("null")), d::latest_ethereum_block_number.eq(sql("null")), @@ -930,8 +987,10 @@ pub fn create_deployment( // New subgraphs index only a prefix of bytea columns // see: attr-bytea-prefix m::use_bytea_prefix.eq(true), - m::start_block_hash.eq(b(&earliest_block)), - m::start_block_number.eq(earliest_block_number), + m::start_block_hash.eq(b(&start_block)), + m::start_block_number.eq(start_block.as_ref().map(|ptr| ptr.number)), + m::raw_yaml.eq(raw_yaml), + m::entities_with_causality_region.eq(entities_with_causality_region), ); if exists && replace { @@ -1010,3 +1069,34 @@ pub fn set_entity_count( .execute(conn)?; Ok(()) } + +pub fn set_earliest_block( + conn: &PgConnection, + site: &Site, + earliest_block: BlockNumber, +) -> Result<(), StoreError> { + use subgraph_deployment as d; + + update(d::table.filter(d::id.eq(site.id))) + .set(d::earliest_block_number.eq(earliest_block)) + .execute(conn)?; + Ok(()) +} + +/// Lock the deployment `site` for writes while `f` is running. The lock can +/// cross transactions, and `f` can therefore execute multiple transactions +/// while other write activity for that deployment is locked out. Block the +/// current thread until we can acquire the lock. +// see also: deployment-lock-for-update +pub fn with_lock(conn: &PgConnection, site: &Site, f: F) -> Result +where + F: FnOnce() -> Result, +{ + let mut backoff = ExponentialBackoff::new(Duration::from_millis(100), Duration::from_secs(15)); + while !advisory_lock::lock_deployment_session(conn, site)? { + backoff.sleep(); + } + let res = f(); + advisory_lock::unlock_deployment_session(conn, site)?; + res +} diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 9f21873674e..45b2b9aaef7 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -3,11 +3,13 @@ use diesel::connection::SimpleConnection; use diesel::pg::PgConnection; use diesel::prelude::*; use diesel::r2d2::{ConnectionManager, PooledConnection}; +use graph::anyhow::Context; use graph::blockchain::block_stream::FirehoseCursor; -use graph::components::store::{EntityKey, EntityType, StoredDynamicDataSource}; +use graph::components::store::{EntityKey, EntityType, PruneReporter, StoredDynamicDataSource}; use graph::components::versions::VERSIONS; use graph::data::query::Trace; use graph::data::subgraph::{status, SPEC_VERSION_0_0_6}; +use graph::data_source::CausalityRegion; use graph::prelude::{ tokio, ApiVersion, CancelHandle, CancelToken, CancelableError, EntityOperation, PoolWaitStats, SubgraphDeploymentEntity, @@ -38,11 +40,12 @@ use graph::prelude::{ use graph_graphql::prelude::api_schema; use web3::types::Address; -use crate::block_range::block_number; +use crate::block_range::{block_number, BLOCK_COLUMN, BLOCK_RANGE_COLUMN}; use crate::catalog; use crate::deployment; use crate::detail::ErrorDetail; use crate::dynds::DataSourcesTable; +use crate::relational::index::{CreateIndex, Method}; use crate::relational::{Layout, LayoutCache, SqlName, Table}; use crate::relational_queries::FromEntityData; use crate::{connection_pool::ConnectionPool, detail}; @@ -132,7 +135,7 @@ impl DeploymentStore { let mut replica_order: Vec<_> = pool_weights .iter() .enumerate() - .map(|(i, weight)| { + .flat_map(|(i, weight)| { let replica = if i == 0 { ReplicaId::Main } else { @@ -140,7 +143,6 @@ impl DeploymentStore { }; vec![replica; *weight] }) - .flatten() .collect(); let mut rng = thread_rng(); replica_order.shuffle(&mut rng); @@ -173,6 +175,8 @@ impl DeploymentStore { let exists = deployment::exists(&conn, &site)?; // Create (or update) the metadata. Update only happens in tests + let entities_with_causality_region = + deployment.manifest.entities_with_causality_region.clone(); if replace || !exists { deployment::create_deployment(&conn, &site, deployment, exists, replace)?; }; @@ -182,7 +186,12 @@ impl DeploymentStore { let query = format!("create schema {}", &site.namespace); conn.batch_execute(&query)?; - let layout = Layout::create_relational_schema(&conn, site.clone(), schema)?; + let layout = Layout::create_relational_schema( + &conn, + site.clone(), + schema, + entities_with_causality_region.into_iter().collect(), + )?; // See if we are grafting and check that the graft is permissible if let Some(base) = graft_base { let errors = layout.can_copy_from(&base); @@ -211,7 +220,8 @@ impl DeploymentStore { site: &Site, ) -> Result { let conn = self.get_conn()?; - detail::deployment_entity(&conn, site) + Ok(detail::deployment_entity(&conn, site) + .with_context(|| format!("Deployment details not found for {}", site.deployment))?) } // Remove the data and metadata for the deployment `site`. This operation @@ -235,17 +245,11 @@ impl DeploymentStore { ) -> Result<(Vec, Trace), QueryExecutionError> { let layout = self.layout(conn, site)?; - let logger = query.logger.unwrap_or_else(|| self.logger.clone()); - layout.query( - &logger, - conn, - query.collection, - query.filter, - query.order, - query.range, - query.block, - query.query_id, - ) + let logger = query + .logger + .cheap_clone() + .unwrap_or_else(|| self.logger.cheap_clone()); + layout.query(&logger, conn, query) } fn check_interface_entity_uniqueness( @@ -277,8 +281,7 @@ impl DeploymentStore { .interfaces_for_type(&key.entity_type) .into_iter() .flatten() - .map(|interface| &types_for_interface[&interface.into()]) - .flatten() + .flat_map(|interface| &types_for_interface[&EntityType::from(interface)]) .map(EntityType::from) .filter(|type_name| type_name != &key.entity_type), ); @@ -312,7 +315,7 @@ impl DeploymentStore { let mut inserts = HashMap::new(); let mut overwrites = HashMap::new(); let mut removals = HashMap::new(); - for modification in mods.into_iter() { + for modification in mods.iter() { match modification { Insert { key, data } => { inserts @@ -413,9 +416,7 @@ impl DeploymentStore { stopwatch: &StopwatchMetrics, ) -> Result { let _section = stopwatch.start_section("apply_entity_modifications_delete"); - layout - .delete(conn, entity_type, entity_keys, block_number(ptr), stopwatch) - .map_err(|_error| anyhow!("Failed to remove entities: {:?}", entity_keys).into()) + layout.delete(conn, entity_type, entity_keys, block_number(ptr), stopwatch) } /// Execute a closure with a connection to the database. @@ -682,9 +683,54 @@ impl DeploymentStore { } /// Runs the SQL `ANALYZE` command in a table. - pub(crate) fn analyze(&self, site: Arc, entity_name: &str) -> Result<(), StoreError> { + pub(crate) fn analyze(&self, site: Arc, entity: Option<&str>) -> Result<(), StoreError> { + let conn = self.get_conn()?; + let layout = self.layout(&conn, site)?; + let tables = entity + .map(|entity| resolve_table_name(&layout, &entity)) + .transpose()? + .map(|table| vec![table]) + .unwrap_or_else(|| layout.tables.values().map(Arc::as_ref).collect()); + for table in tables { + table.analyze(&conn)?; + } + Ok(()) + } + + pub(crate) fn stats_targets( + &self, + site: Arc, + ) -> Result<(i32, BTreeMap>), StoreError> { + let conn = self.get_conn()?; + let default = catalog::default_stats_target(&conn)?; + let targets = catalog::stats_targets(&conn, &site.namespace)?; + + Ok((default, targets)) + } + + pub(crate) fn set_stats_target( + &self, + site: Arc, + entity: Option<&str>, + columns: Vec, + target: i32, + ) -> Result<(), StoreError> { let conn = self.get_conn()?; - self.analyze_with_conn(site, entity_name, &conn) + let layout = self.layout(&conn, site.clone())?; + + let tables = entity + .map(|entity| resolve_table_name(&layout, &entity)) + .transpose()? + .map(|table| vec![table]) + .unwrap_or_else(|| layout.tables.values().map(Arc::as_ref).collect()); + + conn.transaction(|| { + for table in tables { + let columns = resolve_column_names(table, &columns)?; + catalog::set_stats_target(&conn, &site.namespace, &table.name, &columns, target)?; + } + Ok(()) + }) } /// Runs the SQL `ANALYZE` command in a table, with a shared connection. @@ -696,12 +742,9 @@ impl DeploymentStore { ) -> Result<(), StoreError> { let store = self.clone(); let entity_name = entity_name.to_owned(); - let layout = store.layout(&conn, site)?; + let layout = store.layout(conn, site)?; let table = resolve_table_name(&layout, &entity_name)?; - let table_name = &table.qualified_name; - let sql = format!("analyze {table_name}"); - conn.execute(&sql)?; - Ok(()) + table.analyze(conn) } /// Creates a new index in the specified Entity table if it doesn't already exist. @@ -712,7 +755,7 @@ impl DeploymentStore { site: Arc, entity_name: &str, field_names: Vec, - index_method: String, + index_method: Method, ) -> Result<(), StoreError> { let store = self.clone(); let entity_name = entity_name.to_owned(); @@ -754,7 +797,7 @@ impl DeploymentStore { &self, site: Arc, entity_name: &str, - ) -> Result, StoreError> { + ) -> Result, StoreError> { let store = self.clone(); let entity_name = entity_name.to_owned(); self.with_conn(move |conn, _| { @@ -762,8 +805,13 @@ impl DeploymentStore { let layout = store.layout(conn, site)?; let table = resolve_table_name(&layout, &entity_name)?; let table_name = &table.name; - catalog::indexes_for_table(conn, schema_name.as_str(), table_name.as_str()) - .map_err(Into::into) + let indexes = + catalog::indexes_for_table(conn, schema_name.as_str(), table_name.as_str()) + .map_err(StoreError::from)?; + Ok(indexes + .into_iter() + .map(|defn| CreateIndex::parse(defn)) + .collect()) }) .await } @@ -797,6 +845,61 @@ impl DeploymentStore { }) .await } + + pub(crate) async fn prune( + self: &Arc, + mut reporter: Box, + site: Arc, + earliest_block: BlockNumber, + reorg_threshold: BlockNumber, + prune_ratio: f64, + ) -> Result, StoreError> { + let store = self.clone(); + self.with_conn(move |conn, cancel| { + let layout = store.layout(conn, site.clone())?; + cancel.check_cancel()?; + let state = deployment::state(conn, site.deployment.clone())?; + + if state.latest_block.number <= reorg_threshold { + return Ok(reporter); + } + + if state.earliest_block_number > earliest_block { + return Err(constraint_violation!("earliest block can not move back from {} to {}", state.earliest_block_number, earliest_block).into()); + } + + let final_block = state.latest_block.number - reorg_threshold; + if final_block <= earliest_block { + return Err(constraint_violation!("the earliest block {} must be at least {} blocks before the current latest block {}", earliest_block, reorg_threshold, state.latest_block.number).into()); + } + + if let Some((_, graft)) = deployment::graft_point(conn, &site.deployment)? { + if graft.block_number() >= earliest_block { + return Err(constraint_violation!("the earliest block {} must be after the graft point {}", earliest_block, graft.block_number()).into()); + } + } + + cancel.check_cancel()?; + + conn.transaction(|| { + deployment::set_earliest_block(conn, site.as_ref(), earliest_block) + })?; + + cancel.check_cancel()?; + + layout.prune_by_copying( + &store.logger, + reporter.as_mut(), + conn, + earliest_block, + final_block, + prune_ratio, + cancel, + )?; + Ok(reporter) + }) + .await + } } /// Methods that back the trait `graph::components::Store`, but have small @@ -808,7 +911,7 @@ impl DeploymentStore { self.with_conn(|conn, cancel| { cancel.check_cancel()?; - Self::block_ptr_with_conn(&conn, site).map_err(Into::into) + Self::block_ptr_with_conn(conn, site).map_err(Into::into) }) .await } @@ -819,7 +922,7 @@ impl DeploymentStore { self.with_conn(|conn, cancel| { cancel.check_cancel()?; - deployment::get_subgraph_firehose_cursor(&conn, site) + deployment::get_subgraph_firehose_cursor(conn, site) .map(FirehoseCursor::from) .map_err(Into::into) }) @@ -926,12 +1029,8 @@ impl DeploymentStore { let info = self.subgraph_info(&site5).map_err(anyhow::Error::from)?; - let mut finisher = ProofOfIndexingFinisher::new( - &block2, - &site3.deployment, - &indexer, - info.poi_version.clone(), - ); + let mut finisher = + ProofOfIndexingFinisher::new(&block2, &site3.deployment, &indexer, info.poi_version); for (name, region) in by_causality_region.drain() { finisher.add_causality_region(&name, ®ion); } @@ -949,17 +1048,17 @@ impl DeploymentStore { ) -> Result, StoreError> { let conn = self.get_conn()?; let layout = self.layout(&conn, site)?; - layout.find(&conn, &key.entity_type, &key.entity_id, block) + layout.find(&conn, &key, block) } - /// Retrieve all the entities matching `ids_for_type` from the - /// deployment `site`. Only consider entities as of the given `block` + /// Retrieve all the entities matching `ids_for_type`, both the type and causality region, from + /// the deployment `site`. Only consider entities as of the given `block` pub(crate) fn get_many( &self, site: Arc, - ids_for_type: &BTreeMap<&EntityType, Vec<&str>>, + ids_for_type: &BTreeMap<(EntityType, CausalityRegion), Vec>, block: BlockNumber, - ) -> Result>, StoreError> { + ) -> Result, StoreError> { if ids_for_type.is_empty() { return Ok(BTreeMap::new()); } @@ -1003,61 +1102,64 @@ impl DeploymentStore { data_sources: &[StoredDynamicDataSource], deterministic_errors: &[SubgraphError], manifest_idx_and_name: &[(u32, String)], - offchain_to_remove: &[StoredDynamicDataSource], + processed_data_sources: &[StoredDynamicDataSource], ) -> Result { let conn = { let _section = stopwatch.start_section("transact_blocks_get_conn"); self.get_conn()? }; - let event = conn.transaction(|| -> Result<_, StoreError> { - // Emit a store event for the changes we are about to make. We - // wait with sending it until we have done all our other work - // so that we do not hold a lock on the notification queue - // for longer than we have to - let event: StoreEvent = StoreEvent::from_mods(&site.deployment, mods); - - // Make the changes - let layout = self.layout(&conn, site.clone())?; - let section = stopwatch.start_section("apply_entity_modifications"); - let count = self.apply_entity_modifications( - &conn, - layout.as_ref(), - mods, - block_ptr_to, - stopwatch, - )?; - section.end(); - - dynds::insert( - &conn, - &site, - data_sources, - block_ptr_to, - manifest_idx_and_name, - )?; + let event = deployment::with_lock(&conn, &site, || { + conn.transaction(|| -> Result<_, StoreError> { + // Emit a store event for the changes we are about to make. We + // wait with sending it until we have done all our other work + // so that we do not hold a lock on the notification queue + // for longer than we have to + let event: StoreEvent = StoreEvent::from_mods(&site.deployment, mods); - dynds::remove_offchain(&conn, &site, offchain_to_remove)?; + // Make the changes + let layout = self.layout(&conn, site.clone())?; - if !deterministic_errors.is_empty() { - deployment::insert_subgraph_errors( + let section = stopwatch.start_section("apply_entity_modifications"); + let count = self.apply_entity_modifications( &conn, - &site.deployment, - deterministic_errors, - block_ptr_to.block_number(), + layout.as_ref(), + mods, + block_ptr_to, + stopwatch, )?; - } + section.end(); - deployment::transact_block( - &conn, - &site, - block_ptr_to, - firehose_cursor, - layout.count_query.as_str(), - count, - )?; + dynds::insert( + &conn, + &site, + data_sources, + block_ptr_to, + manifest_idx_and_name, + )?; + + dynds::update_offchain_status(&conn, &site, processed_data_sources)?; + + if !deterministic_errors.is_empty() { + deployment::insert_subgraph_errors( + &conn, + &site.deployment, + deterministic_errors, + block_ptr_to.block_number(), + )?; + } + + deployment::transact_block( + &conn, + &site, + block_ptr_to, + firehose_cursor, + layout.count_query.as_str(), + count, + )?; - Ok(event) + Ok(event) + }) })?; Ok(event) @@ -1070,47 +1172,54 @@ impl DeploymentStore { block_ptr_to: BlockPtr, firehose_cursor: &FirehoseCursor, ) -> Result { - let event = conn.transaction(|| -> Result<_, StoreError> { - // Don't revert past a graft point - let info = self.subgraph_info_with_conn(conn, site.as_ref())?; - if let Some(graft_block) = info.graft_block { - if graft_block > block_ptr_to.number { - return Err(anyhow!( - "Can not revert subgraph `{}` to block {} as it was \ + let event = deployment::with_lock(conn, &site, || { + conn.transaction(|| -> Result<_, StoreError> { + // Don't revert past a graft point + let info = self.subgraph_info_with_conn(conn, site.as_ref())?; + if let Some(graft_block) = info.graft_block { + if graft_block > block_ptr_to.number { + return Err(anyhow!( + "Can not revert subgraph `{}` to block {} as it was \ grafted at block {} and reverting past a graft point \ is not possible", - site.deployment.clone(), - block_ptr_to.number, - graft_block - ) - .into()); + site.deployment.clone(), + block_ptr_to.number, + graft_block + ) + .into()); + } } - } - // The revert functions want the number of the first block that we need to get rid of - let block = block_ptr_to.number + 1; + // The revert functions want the number of the first block that we need to get rid of + let block = block_ptr_to.number + 1; - deployment::revert_block_ptr(conn, &site.deployment, block_ptr_to, firehose_cursor)?; + deployment::revert_block_ptr( + conn, + &site.deployment, + block_ptr_to, + firehose_cursor, + )?; - // Revert the data - let layout = self.layout(conn, site.clone())?; + // Revert the data + let layout = self.layout(conn, site.clone())?; - let (event, count) = layout.revert_block(conn, block)?; + let (event, count) = layout.revert_block(conn, block)?; - // Revert the meta data changes that correspond to this subgraph. - // Only certain meta data changes need to be reverted, most - // importantly creation of dynamic data sources. We ensure in the - // rest of the code that we only record history for those meta data - // changes that might need to be reverted - Layout::revert_metadata(&conn, &site, block)?; + // Revert the meta data changes that correspond to this subgraph. + // Only certain meta data changes need to be reverted, most + // importantly creation of dynamic data sources. We ensure in the + // rest of the code that we only record history for those meta data + // changes that might need to be reverted + Layout::revert_metadata(conn, &site, block)?; - deployment::update_entity_count( - conn, - site.as_ref(), - layout.count_query.as_str(), - count, - )?; - Ok(event) + deployment::update_entity_count( + conn, + site.as_ref(), + layout.count_query.as_str(), + count, + )?; + Ok(event) + }) })?; Ok(event) @@ -1172,7 +1281,7 @@ impl DeploymentStore { error: SubgraphError, ) -> Result<(), StoreError> { self.with_conn(move |conn, _| { - conn.transaction(|| deployment::fail(&conn, &id, &error)) + conn.transaction(|| deployment::fail(conn, &id, &error)) .map_err(Into::into) }) .await?; @@ -1208,12 +1317,22 @@ impl DeploymentStore { manifest_idx_and_name: Vec<(u32, String)>, ) -> Result, StoreError> { self.with_conn(move |conn, _| { - conn.transaction(|| crate::dynds::load(&conn, &site, block, manifest_idx_and_name)) + conn.transaction(|| crate::dynds::load(conn, &site, block, manifest_idx_and_name)) .map_err(Into::into) }) .await } + pub(crate) async fn causality_region_curr_val( + &self, + site: Arc, + ) -> Result, StoreError> { + self.with_conn(move |conn, _| { + Ok(conn.transaction(|| crate::dynds::causality_region_curr_val(conn, &site))?) + }) + .await + } + pub(crate) async fn exists_and_synced(&self, id: DeploymentHash) -> Result { self.with_conn(move |conn, _| { conn.transaction(|| deployment::exists_and_synced(conn, &id)) @@ -1244,12 +1363,12 @@ impl DeploymentStore { &self, logger: &Logger, site: Arc, - graft_src: Option<(Arc, BlockPtr)>, + graft_src: Option<(Arc, BlockPtr, SubgraphDeploymentEntity)>, ) -> Result<(), StoreError> { let dst = self.find_layout(site.cheap_clone())?; - // Do any cleanup to bring the subgraph into a known good state - if let Some((src, block)) = graft_src { + // If `graft_src` is `Some`, then there is a pending graft. + if let Some((src, block, src_deployment)) = graft_src { info!( logger, "Initializing graft by copying data from {} to {}", @@ -1257,6 +1376,12 @@ impl DeploymentStore { dst.catalog.site.namespace ); + let src_manifest_idx_and_name = src_deployment.manifest.template_idx_and_name()?; + let dst_manifest_idx_and_name = self + .load_deployment(&dst.site)? + .manifest + .template_idx_and_name()?; + // Copy subgraph data // We allow both not copying tables at all from the source, as well // as adding new tables in `self`; we only need to check that tables @@ -1268,6 +1393,8 @@ impl DeploymentStore { src.clone(), dst.clone(), block.clone(), + src_manifest_idx_and_name, + dst_manifest_idx_and_name, )?; let status = copy_conn.copy_data()?; if status == crate::copy::Status::Cancelled { @@ -1313,6 +1440,12 @@ impl DeploymentStore { info!(logger, "Counted the entities"; "time_ms" => start.elapsed().as_millis()); + deployment::set_earliest_block( + &conn, + &dst.site, + src_deployment.earliest_block_number, + )?; + // Analyze all tables for this deployment for entity_name in dst.tables.keys() { self.analyze_with_conn(site.cheap_clone(), entity_name.as_str(), &conn)?; @@ -1524,10 +1657,21 @@ impl DeploymentStore { &self, site: &Site, ) -> Result { - let id = site.id.clone(); + let id = site.id; self.with_conn(move |conn, _| deployment::health(conn, id).map_err(Into::into)) .await } + + pub(crate) async fn set_manifest_raw_yaml( + &self, + site: Arc, + raw_yaml: String, + ) -> Result<(), StoreError> { + self.with_conn(move |conn, _| { + deployment::set_manifest_raw_yaml(conn, &site, &raw_yaml).map_err(Into::into) + }) + .await + } } /// Tries to fetch a [`Table`] either by its Entity name or its SQL name. @@ -1546,26 +1690,34 @@ fn resolve_table_name<'a>(layout: &'a Layout, name: &'_ str) -> Result<&'a Table }) } -// Resolves column names. -// -// Since we allow our input to be either camel-case or snake-case, we must retry the -// search using the latter if the search for the former fails. +/// Resolves column names against the `table`. The `field_names` can be +/// either GraphQL attributes or the SQL names of columns. We also accept +/// the names `block_range` and `block$` and map that to the correct name +/// for the block range column for that table. fn resolve_column_names<'a, T: AsRef>( table: &'a Table, field_names: &[T], -) -> Result, StoreError> { +) -> Result, StoreError> { + fn lookup<'a>(table: &'a Table, field: &str) -> Result<&'a SqlName, StoreError> { + table + .column_for_field(field) + .or_else(|_error| { + let sql_name = SqlName::from(field); + table + .column(&sql_name) + .ok_or_else(|| StoreError::UnknownField(field.to_string())) + }) + .map(|column| &column.name) + } + field_names .iter() .map(|f| { - table - .column_for_field(f.as_ref()) - .or_else(|_error| { - let sql_name = SqlName::from(f.as_ref()); - table - .column(&sql_name) - .ok_or_else(|| StoreError::UnknownField(f.as_ref().to_string())) - }) - .map(|column| column.name.as_str()) + if f.as_ref() == BLOCK_RANGE_COLUMN || f.as_ref() == BLOCK_COLUMN { + Ok(table.block_column()) + } else { + lookup(table, f.as_ref()) + } }) .collect() } diff --git a/store/postgres/src/detail.rs b/store/postgres/src/detail.rs index 9f9b65c8da5..f67ffbae34a 100644 --- a/store/postgres/src/detail.rs +++ b/store/postgres/src/detail.rs @@ -10,6 +10,7 @@ use diesel::prelude::{ use diesel_derives::Associations; use git_testament::{git_testament, git_testament_macros}; use graph::blockchain::BlockHash; +use graph::components::store::EntityType; use graph::data::subgraph::schema::{SubgraphError, SubgraphManifestEntity}; use graph::prelude::{ bigdecimal::ToPrimitive, BigDecimal, BlockPtr, DeploymentHash, StoreError, @@ -49,10 +50,7 @@ pub struct DeploymentDetail { pub synced: bool, fatal_error: Option, non_fatal_errors: Vec, - // Not used anymore; only written to keep backwards compatible - earliest_ethereum_block_hash: Option, - earliest_ethereum_block_number: Option, - // New tracker for earliest block number + /// The earliest block for which we have history earliest_block_number: i32, pub latest_ethereum_block_hash: Option, pub latest_ethereum_block_number: Option, @@ -340,6 +338,8 @@ struct StoredSubgraphManifest { use_bytea_prefix: bool, start_block_number: Option, start_block_hash: Option, + raw_yaml: Option, + entities_with_causality_region: Vec, } impl From for SubgraphManifestEntity { @@ -350,6 +350,8 @@ impl From for SubgraphManifestEntity { repository: value.repository, features: value.features, schema: value.schema, + raw_yaml: value.raw_yaml, + entities_with_causality_region: value.entities_with_causality_region, } } } @@ -360,13 +362,13 @@ impl TryFrom for SubgraphDeploymentEntity { type Error = StoreError; fn try_from(ent: StoredDeploymentEntity) -> Result { - let (detail, manifest) = (ent.0, ent.1.into()); + let (detail, manifest) = (ent.0, ent.1); - let earliest_block = block( + let start_block = block( &detail.deployment, - "earliest_block", - detail.earliest_ethereum_block_hash, - detail.earliest_ethereum_block_number, + "start_block", + manifest.start_block_hash.clone(), + manifest.start_block_number.map(|n| n.into()), )? .map(|block| block.to_ptr()); @@ -399,13 +401,14 @@ impl TryFrom for SubgraphDeploymentEntity { .map_err(|b| constraint_violation!("invalid debug fork `{}`", b))?; Ok(SubgraphDeploymentEntity { - manifest, + manifest: manifest.into(), failed: detail.failed, health: detail.health.into(), synced: detail.synced, fatal_error: None, non_fatal_errors: vec![], - earliest_block, + earliest_block_number: detail.earliest_block_number, + start_block, latest_block, graft_base, graft_block, diff --git a/store/postgres/src/dynds/mod.rs b/store/postgres/src/dynds/mod.rs index 9a352705405..5f6dd273964 100644 --- a/store/postgres/src/dynds/mod.rs +++ b/store/postgres/src/dynds/mod.rs @@ -9,6 +9,7 @@ use graph::{ blockchain::BlockPtr, components::store::StoredDynamicDataSource, constraint_violation, + data_source::CausalityRegion, prelude::{BlockNumber, StoreError}, }; @@ -58,19 +59,34 @@ pub(crate) fn revert( } } -pub(crate) fn remove_offchain( +pub(crate) fn update_offchain_status( conn: &PgConnection, site: &Site, data_sources: &[StoredDynamicDataSource], ) -> Result<(), StoreError> { - if data_sources.len() == 0 { + if data_sources.is_empty() { return Ok(()); } match site.schema_version.private_data_sources() { - true => DataSourcesTable::new(site.namespace.clone()).remove_offchain(conn, data_sources), + true => { + DataSourcesTable::new(site.namespace.clone()).update_offchain_status(conn, data_sources) + } false => Err(constraint_violation!( "shared schema does not support data source offchain_found", )), } } + +/// The maximum assigned causality region. Any higher number is therefore free to be assigned. +pub(crate) fn causality_region_curr_val( + conn: &PgConnection, + site: &Site, +) -> Result, StoreError> { + match site.schema_version.private_data_sources() { + true => DataSourcesTable::new(site.namespace.clone()).causality_region_curr_val(conn), + + // Subgraphs on the legacy shared table do not use offchain data sources. + false => Ok(None), + } +} diff --git a/store/postgres/src/dynds/private.rs b/store/postgres/src/dynds/private.rs index 78710db066a..e04c7d303b2 100644 --- a/store/postgres/src/dynds/private.rs +++ b/store/postgres/src/dynds/private.rs @@ -2,14 +2,17 @@ use std::ops::Bound; use diesel::{ pg::types::sql_types, + prelude::*, sql_query, sql_types::{Binary, Integer, Jsonb, Nullable}, PgConnection, QueryDsl, RunQueryDsl, }; use graph::{ + anyhow::Context, components::store::StoredDynamicDataSource, constraint_violation, + data_source::CausalityRegion, prelude::{serde_json, BlockNumber, StoreError}, }; @@ -29,6 +32,7 @@ pub(crate) struct DataSourcesTable { manifest_idx: DynColumn, param: DynColumn>, context: DynColumn>, + done_at: DynColumn>, } impl DataSourcesTable { @@ -47,6 +51,7 @@ impl DataSourcesTable { manifest_idx: table.column("manifest_idx"), param: table.column("param"), context: table.column("context"), + done_at: table.column("done_at"), table, } } @@ -57,15 +62,17 @@ impl DataSourcesTable { create table {nsp}.{table} ( vid integer generated by default as identity primary key, block_range int4range not null, - causality_region integer generated by default as identity, + causality_region integer not null, manifest_idx integer not null, parent integer references {nsp}.{table}, id bytea, param bytea, - context jsonb + context jsonb, + done_at int ); create index gist_block_range_data_sources$ on {nsp}.data_sources$ using gist (block_range); + create index btree_causality_region_data_sources$ on {nsp}.data_sources$ (causality_region); ", nsp = self.namespace.to_string(), table = Self::TABLE_NAME @@ -85,7 +92,8 @@ impl DataSourcesTable { i32, Option>, Option, - i32, + CausalityRegion, + Option, ); let tuples = self .table @@ -97,6 +105,7 @@ impl DataSourcesTable { &self.param, &self.context, &self.causality_region, + &self.done_at, )) .order_by(&self.vid) .load::(conn)?; @@ -104,7 +113,7 @@ impl DataSourcesTable { let mut dses: Vec<_> = tuples .into_iter() .map( - |(block_range, manifest_idx, param, context, causality_region)| { + |(block_range, manifest_idx, param, context, causality_region, done_at)| { let creation_block = match block_range.0 { Bound::Included(block) => Some(block), @@ -114,13 +123,13 @@ impl DataSourcesTable { } }; - let is_offchain = causality_region > 0; StoredDynamicDataSource { manifest_idx: manifest_idx as u32, param: param.map(|p| p.into()), context, creation_block, - is_offchain, + done_at, + causality_region, } }, ) @@ -146,7 +155,8 @@ impl DataSourcesTable { param, context, creation_block, - is_offchain, + done_at, + causality_region, } = ds; if creation_block != &Some(block) { @@ -159,30 +169,21 @@ impl DataSourcesTable { // Offchain data sources have a unique causality region assigned from a sequence in the // database, while onchain data sources always have causality region 0. - let query = match is_offchain { - false => format!( - "insert into {}(block_range, manifest_idx, param, context, causality_region) \ - values (int4range($1, null), $2, $3, $4, $5)", - self.qname - ), - - true => format!( - "insert into {}(block_range, manifest_idx, param, context) \ - values (int4range($1, null), $2, $3, $4)", - self.qname - ), - }; + let query = format!( + "insert into {}(block_range, manifest_idx, param, context, causality_region, done_at) \ + values (int4range($1, null), $2, $3, $4, $5, $6)", + self.qname + ); let query = sql_query(query) .bind::, _>(creation_block) .bind::(*manifest_idx as i32) .bind::, _>(param.as_ref().map(|p| &**p)) - .bind::, _>(context); + .bind::, _>(context) + .bind::(causality_region) + .bind::, _>(done_at); - inserted_total += match is_offchain { - false => query.bind::(0).execute(conn)?, - true => query.execute(conn)?, - }; + inserted_total += query.execute(conn)?; } Ok(inserted_total) @@ -206,6 +207,8 @@ impl DataSourcesTable { conn: &PgConnection, dst: &DataSourcesTable, target_block: BlockNumber, + src_manifest_idx_and_name: &[(i32, String)], + dst_manifest_idx_and_name: &[(i32, String)], ) -> Result { // Check if there are any data sources for dst which indicates we already copied let count = dst.table.clone().count().get_result::(conn)?; @@ -213,82 +216,122 @@ impl DataSourcesTable { return Ok(count as usize); } - let query = format!( - "\ - insert into {dst}(block_range, causality_region, manifest_idx, parent, id, param, context) - select case - when upper(e.block_range) <= $1 then e.block_range - else int4range(lower(e.block_range), null) - end, - e.causality_region, e.manifest_idx, e.parent, e.id, e.param, e.context - from {src} e - where lower(e.block_range) <= $1 - ", - src = self.qname, - dst = dst.qname + type Tuple = ( + (Bound, Bound), + i32, + Option>, + Option, + i32, + Option, ); - let count = sql_query(&query) - .bind::(target_block) - .execute(conn)?; + let src_tuples = self + .table + .clone() + .filter(diesel::dsl::sql("lower(block_range) <= ").bind::(target_block)) + .select(( + &self.block_range, + &self.manifest_idx, + &self.param, + &self.context, + &self.causality_region, + &self.done_at, + )) + .order_by(&self.vid) + .load::(conn)?; + + let mut count = 0; + for (block_range, src_manifest_idx, param, context, causality_region, done_at) in src_tuples + { + let name = &src_manifest_idx_and_name + .iter() + .find(|(idx, _)| idx == &src_manifest_idx) + .context("manifest_idx not found in src")? + .1; + let dst_manifest_idx = dst_manifest_idx_and_name + .iter() + .find(|(_, n)| n == name) + .context("name not found in dst")? + .0; - // Test that both tables have the same contents. - debug_assert!( - self.load(conn, target_block).map_err(|e| e.to_string()) - == dst.load(conn, target_block).map_err(|e| e.to_string()) - ); + let query = format!( + "\ + insert into {dst}(block_range, manifest_idx, param, context, causality_region, done_at) + values(case + when upper($2) <= $1 then $2 + else int4range(lower($2), null) + end, + $3, $4, $5, $6, $7) + ", + dst = dst.qname + ); + + count += sql_query(&query) + .bind::(target_block) + .bind::, _>(block_range) + .bind::(dst_manifest_idx) + .bind::, _>(param) + .bind::, _>(context) + .bind::(causality_region) + .bind::, _>(done_at) + .execute(conn)?; + } + + // If the manifest idxes remained constant, we can test that both tables have the same + // contents. + if src_manifest_idx_and_name == dst_manifest_idx_and_name { + debug_assert!( + self.load(conn, target_block).map_err(|e| e.to_string()) + == dst.load(conn, target_block).map_err(|e| e.to_string()) + ); + } Ok(count) } - // Remove offchain data sources by checking for equality. Their range will be set to the empty range. - pub(super) fn remove_offchain( + // Remove offchain data sources by checking the causality region, which currently uniquely + // identifies an offchain data source. + pub(super) fn update_offchain_status( &self, conn: &PgConnection, data_sources: &[StoredDynamicDataSource], ) -> Result<(), StoreError> { for ds in data_sources { - let StoredDynamicDataSource { - manifest_idx, - param, - context, - creation_block, - is_offchain, - } = ds; - - if !is_offchain { - return Err(constraint_violation!( - "called remove_offchain with onchain data sources" - )); - } - let query = format!( - "update {} set block_range = 'empty'::int4range \ - where manifest_idx = $1 - and param is not distinct from $2 - and context is not distinct from $3 - and lower(block_range) is not distinct from $4", + "update {} set done_at = $1 where causality_region = $2", self.qname ); let count = sql_query(query) - .bind::(*manifest_idx as i32) - .bind::, _>(param.as_ref().map(|p| &**p)) - .bind::, _>(context) - .bind::, _>(creation_block) + .bind::, _>(ds.done_at) + .bind::(ds.causality_region) .execute(conn)?; if count > 1 { - // Data source deduplication enforces this invariant. - // See also: data-source-is-duplicate-of return Err(constraint_violation!( - "expected to remove at most one offchain data source but would remove {}, ds: {:?}", + "expected to remove at most one offchain data source but would remove {}, causality region: {}", count, - ds + ds.causality_region )); } } Ok(()) } + + /// The current causality sequence according to the store, which is infered to be the maximum + /// value existing in the table. + pub(super) fn causality_region_curr_val( + &self, + conn: &PgConnection, + ) -> Result, StoreError> { + // Get the maximum `causality_region` leveraging the btree index. + Ok(self + .table + .clone() + .select(&self.causality_region) + .order_by((&self.causality_region).desc()) + .first::(conn) + .optional()?) + } } diff --git a/store/postgres/src/dynds/shared.rs b/store/postgres/src/dynds/shared.rs index a151a779cbe..418c4583fa9 100644 --- a/store/postgres/src/dynds/shared.rs +++ b/store/postgres/src/dynds/shared.rs @@ -12,6 +12,7 @@ use diesel::{insert_into, pg::PgConnection}; use graph::{ components::store::StoredDynamicDataSource, constraint_violation, + data_source::CausalityRegion, prelude::{ bigdecimal::ToPrimitive, serde_json, BigDecimal, BlockNumber, BlockPtr, DeploymentHash, StoreError, @@ -84,7 +85,8 @@ pub(super) fn load( // The shared schema is only used for legacy deployments, and therefore not used for // subgraphs that use file data sources. - is_offchain: false, + done_at: None, + causality_region: CausalityRegion::ONCHAIN, }; if data_sources.last().and_then(|d| d.creation_block) > data_source.creation_block { @@ -120,10 +122,11 @@ pub(super) fn insert( param, context, creation_block: _, - is_offchain, + done_at: _, + causality_region, } = ds; - if *is_offchain { + if causality_region != &CausalityRegion::ONCHAIN { return Err(constraint_violation!( "using shared data source schema with file data sources" )); diff --git a/store/postgres/src/lib.rs b/store/postgres/src/lib.rs index 32c20e8b817..57fa58de172 100644 --- a/store/postgres/src/lib.rs +++ b/store/postgres/src/lib.rs @@ -69,7 +69,7 @@ pub use self::subgraph_store::{unused, DeploymentPlacer, Shard, SubgraphStore, P pub mod command_support { pub mod catalog { pub use crate::block_store::primary as block_store; - pub use crate::catalog::account_like; + pub use crate::catalog::{account_like, stats}; pub use crate::copy::{copy_state, copy_table_state}; pub use crate::primary::Connection; pub use crate::primary::{ @@ -77,6 +77,9 @@ pub mod command_support { subgraph_version, Site, }; } + pub mod index { + pub use crate::relational::index::{CreateIndex, Method}; + } pub use crate::primary::Namespace; pub use crate::relational::{Catalog, Column, ColumnType, Layout, SqlName}; } diff --git a/store/postgres/src/primary.rs b/store/postgres/src/primary.rs index 90717784a2e..c4a01086667 100644 --- a/store/postgres/src/primary.rs +++ b/store/postgres/src/primary.rs @@ -239,6 +239,10 @@ impl Namespace { Ok(Namespace(s)) } + pub fn prune(id: DeploymentId) -> Self { + Namespace(format!("prune{id}")) + } + pub fn as_str(&self) -> &str { &self.0 } @@ -1491,6 +1495,18 @@ impl<'a> Connection<'a> { .map_err(|e| anyhow!("error looking up ens_name for hash {}: {}", hash, e).into()) } + pub fn is_ens_table_empty(&self) -> Result { + use ens_names as dsl; + + dsl::table + .select(dsl::name) + .limit(1) + .get_result::(self.conn.as_ref()) + .optional() + .map(|r| r.is_none()) + .map_err(|e| anyhow!("error if ens table is empty: {}", e).into()) + } + pub fn record_active_copy(&self, src: &Site, dst: &Site) -> Result<(), StoreError> { use active_copies as cp; @@ -1515,6 +1531,17 @@ impl<'a> Connection<'a> { } } +/// Return `true` if we deem this installation to be empty, defined as +/// having no deployments and no subgraph names in the database +pub fn is_empty(conn: &PgConnection) -> Result { + use deployment_schemas as ds; + use subgraph as s; + + let empty = ds::table.count().get_result::(conn)? == 0 + && s::table.count().get_result::(conn)? == 0; + Ok(empty) +} + /// A struct that reads from pools in order, trying each pool in turn until /// a query returns either success or anything but a /// `Err(StoreError::DatabaseUnavailable)`. This only works for tables that @@ -1583,19 +1610,25 @@ impl Mirror { "subgraph_version", ]; + fn run_query(conn: &PgConnection, query: String) -> Result<(), StoreError> { + conn.batch_execute(&query).map_err(StoreError::from) + } + fn copy_table( conn: &PgConnection, src_nsp: &str, dst_nsp: &str, table_name: &str, ) -> Result<(), StoreError> { - let query = format!( - "insert into {dst_nsp}.{table_name} select * from {src_nsp}.{table_name};", - src_nsp = src_nsp, - dst_nsp = dst_nsp, - table_name = table_name - ); - conn.batch_execute(&query).map_err(StoreError::from) + run_query( + conn, + format!( + "insert into {dst_nsp}.{table_name} select * from {src_nsp}.{table_name};", + src_nsp = src_nsp, + dst_nsp = dst_nsp, + table_name = table_name + ), + ) } let check_cancel = || { @@ -1622,6 +1655,7 @@ impl Mirror { conn.batch_execute(&query)?; check_cancel()?; + // Repopulate `PUBLIC_TABLES` by copying their data wholesale for table_name in PUBLIC_TABLES { copy_table( conn, @@ -1631,15 +1665,30 @@ impl Mirror { )?; check_cancel()?; } - for table_name in SUBGRAPHS_TABLES { - copy_table( - conn, - &ForeignServer::metadata_schema(&*PRIMARY_SHARD), - NAMESPACE_SUBGRAPHS, - table_name, - )?; - check_cancel()?; - } + + // Repopulate `SUBGRAPHS_TABLES` but only copy the data we actually + // need to respond to queries when the primary is down + let src_nsp = ForeignServer::metadata_schema(&*PRIMARY_SHARD); + let dst_nsp = NAMESPACE_SUBGRAPHS; + + run_query( + conn, + format!( + "insert into {dst_nsp}.subgraph \ + select * from {src_nsp}.subgraph + where current_version is not null;" + ), + )?; + run_query( + conn, + format!( + "insert into {dst_nsp}.subgraph_version \ + select v.* from {src_nsp}.subgraph_version v, {src_nsp}.subgraph s + where v.id = s.current_version;" + ), + )?; + copy_table(conn, &src_nsp, dst_nsp, "subgraph_deployment_assignment")?; + Ok(()) } diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index a71639a9e42..7c88437918c 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -14,6 +14,9 @@ mod ddl_tests; #[cfg(test)] mod query_tests; +pub(crate) mod index; +mod prune; + use diesel::{connection::SimpleConnection, Connection}; use diesel::{debug_query, OptionalExtension, PgConnection, RunQueryDsl}; use graph::cheap_clone::CheapClone; @@ -21,11 +24,12 @@ use graph::constraint_violation; use graph::data::graphql::TypeExt as _; use graph::data::query::Trace; use graph::data::value::Word; -use graph::prelude::{q, s, StopwatchMetrics, ENV_VARS}; +use graph::data_source::CausalityRegion; +use graph::prelude::{q, s, EntityQuery, StopwatchMetrics, ENV_VARS}; use graph::slog::warn; use inflector::Inflector; use lazy_static::lazy_static; -use std::borrow::Cow; +use std::borrow::{Borrow, Cow}; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::convert::{From, TryFrom}; use std::fmt::{self, Write}; @@ -47,9 +51,8 @@ use graph::data::schema::{FulltextConfig, FulltextDefinition, Schema, SCHEMA_TYP use graph::data::store::BYTES_SCALAR; use graph::data::subgraph::schema::{POI_OBJECT, POI_TABLE}; use graph::prelude::{ - anyhow, info, BlockNumber, DeploymentHash, Entity, EntityChange, EntityCollection, - EntityFilter, EntityOperation, EntityOrder, EntityRange, Logger, QueryExecutionError, - StoreError, StoreEvent, ValueType, BLOCK_NUMBER_MAX, + anyhow, info, BlockNumber, DeploymentHash, Entity, EntityChange, EntityOperation, Logger, + QueryExecutionError, StoreError, StoreEvent, ValueType, BLOCK_NUMBER_MAX, }; use crate::block_range::{BLOCK_COLUMN, BLOCK_RANGE_COLUMN}; @@ -57,7 +60,6 @@ pub use crate::catalog::Catalog; use crate::connection_pool::ForeignServer; use crate::{catalog, deployment}; -const POSTGRES_MAX_PARAMETERS: usize = u16::MAX as usize; // 65535 const DELETE_OPERATION_CHUNK_SIZE: usize = 1_000; /// The size of string prefixes that we index. This is chosen so that we @@ -88,7 +90,7 @@ lazy_static! { /// Postgres, we would create the same table twice. We consider this case /// to be pathological and so unlikely in practice that we do not try to work /// around it in the application. -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Hash)] +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Hash, Ord)] pub struct SqlName(String); impl SqlName { @@ -160,6 +162,12 @@ impl fmt::Display for SqlName { } } +impl Borrow for &SqlName { + fn borrow(&self) -> &str { + self.as_str() + } +} + /// The SQL type to use for GraphQL ID properties. We support /// strings and byte arrays #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] @@ -306,6 +314,9 @@ impl Layout { &enums, &id_types, i as u32, + catalog + .entities_with_causality_region + .contains(&EntityType::from(obj_type.clone())), ) }) .collect::, _>>()?; @@ -386,6 +397,7 @@ impl Layout { position: position as u32, is_account_like: false, immutable: false, + has_causality_region: false, } } @@ -397,8 +409,9 @@ impl Layout { conn: &PgConnection, site: Arc, schema: &Schema, + entities_with_causality_region: BTreeSet, ) -> Result { - let catalog = Catalog::for_creation(site.cheap_clone()); + let catalog = Catalog::for_creation(site.cheap_clone(), entities_with_causality_region); let layout = Self::new(site, schema, catalog)?; let sql = layout .as_ddl() @@ -477,31 +490,31 @@ impl Layout { pub fn find( &self, conn: &PgConnection, - entity: &EntityType, - id: &str, + key: &EntityKey, block: BlockNumber, ) -> Result, StoreError> { - let table = self.table_for_entity(entity)?; - FindQuery::new(table.as_ref(), id, block) + let table = self.table_for_entity(&key.entity_type)?; + FindQuery::new(table.as_ref(), key, block) .get_result::(conn) .optional()? .map(|entity_data| entity_data.deserialize_with_layout(self, None, true)) .transpose() } + // An optimization when looking up multiple entities, it will generate a single sql query using `UNION ALL`. pub fn find_many( &self, conn: &PgConnection, - ids_for_type: &BTreeMap<&EntityType, Vec<&str>>, + ids_for_type: &BTreeMap<(EntityType, CausalityRegion), Vec>, block: BlockNumber, - ) -> Result>, StoreError> { + ) -> Result, StoreError> { if ids_for_type.is_empty() { return Ok(BTreeMap::new()); } let mut tables = Vec::new(); - for entity_type in ids_for_type.keys() { - tables.push(self.table_for_entity(entity_type)?.as_ref()); + for (entity_type, cr) in ids_for_type.keys() { + tables.push((self.table_for_entity(entity_type)?.as_ref(), *cr)); } let query = FindManyQuery { _namespace: &self.catalog.site.namespace, @@ -509,17 +522,22 @@ impl Layout { tables, block, }; - let mut entities_for_type: BTreeMap> = BTreeMap::new(); + let mut entities: BTreeMap = BTreeMap::new(); for data in query.load::(conn)? { let entity_type = data.entity_type(); let entity_data: Entity = data.deserialize_with_layout(self, None, true)?; - entities_for_type - .entry(entity_type) - .or_default() - .push(entity_data); + let key = EntityKey { + entity_type, + entity_id: entity_data.id()?.into(), + causality_region: CausalityRegion::from_entity(&entity_data), + }; + let overwrite = entities.insert(key, entity_data).is_some(); + if overwrite { + return Err(constraint_violation!("duplicate entity in result set")); + } } - Ok(entities_for_type) + Ok(entities) } pub fn find_changes( @@ -546,18 +564,15 @@ impl Layout { for entity_data in inserts_or_updates.into_iter() { let entity_type = entity_data.entity_type(); - let mut data: Entity = entity_data.deserialize_with_layout(self, None, false)?; + let data: Entity = entity_data.deserialize_with_layout(self, None, true)?; let entity_id = Word::from(data.id().expect("Invalid ID for entity.")); processed_entities.insert((entity_type.clone(), entity_id.clone())); - // `__typename` is not a real field. - data.remove("__typename") - .expect("__typename expected; this is a bug"); - changes.push(EntityOperation::Set { key: EntityKey { entity_type, entity_id, + causality_region: CausalityRegion::from_entity(&data), }, data, }); @@ -574,6 +589,7 @@ impl Layout { key: EntityKey { entity_type, entity_id, + causality_region: del.causality_region(), }, }); } @@ -593,11 +609,10 @@ impl Layout { let table = self.table_for_entity(entity_type)?; let _section = stopwatch.start_section("insert_modification_insert_query"); let mut count = 0; - // Each operation must respect the maximum number of bindings allowed in PostgreSQL queries, - // so we need to act in chunks whose size is defined by the number of entities times the - // number of attributes each entity type has. - // We add 1 to account for the `block_range` bind parameter - let chunk_size = POSTGRES_MAX_PARAMETERS / (table.columns.len() + 1); + + // We insert the entities in chunks to make sure each operation does + // not exceed the maximum number of bindings allowed in queries + let chunk_size = InsertQuery::chunk_size(table); for chunk in entities.chunks_mut(chunk_size) { count += InsertQuery::new(table, chunk, block)? .get_results(conn) @@ -623,54 +638,62 @@ impl Layout { &self, logger: &Logger, conn: &PgConnection, - collection: EntityCollection, - filter: Option, - order: EntityOrder, - range: EntityRange, - block: BlockNumber, - query_id: Option, + query: EntityQuery, ) -> Result<(Vec, Trace), QueryExecutionError> { fn log_query_timing( logger: &Logger, query: &FilterQuery, elapsed: Duration, entity_count: usize, + trace: bool, ) -> Trace { // 20kB const MAXLEN: usize = 20_480; - if !ENV_VARS.log_sql_timing() { + if !ENV_VARS.log_sql_timing() && !trace { return Trace::None; } let mut text = debug_query(&query).to_string().replace("\n", "\t"); - let trace = Trace::query(&text, elapsed, entity_count); - - // If the query + bind variables is more than MAXLEN, truncate it; - // this will happen when queries have very large bind variables - // (e.g., long arrays of string ids) - if text.len() > MAXLEN { - text.truncate(MAXLEN); - text.push_str(" ..."); + + let trace = if trace { + Trace::query(&text, elapsed, entity_count) + } else { + Trace::None + }; + + if ENV_VARS.log_sql_timing() { + // If the query + bind variables is more than MAXLEN, truncate it; + // this will happen when queries have very large bind variables + // (e.g., long arrays of string ids) + if text.len() > MAXLEN { + text.truncate(MAXLEN); + text.push_str(" ..."); + } + info!( + logger, + "Query timing (SQL)"; + "query" => text, + "time_ms" => elapsed.as_millis(), + "entity_count" => entity_count + ); } - info!( - logger, - "Query timing (SQL)"; - "query" => text, - "time_ms" => elapsed.as_millis(), - "entity_count" => entity_count - ); trace } - let filter_collection = FilterCollection::new(self, collection, filter.as_ref(), block)?; + let trace = query.trace; + + let filter_collection = + FilterCollection::new(self, query.collection, query.filter.as_ref(), query.block)?; let query = FilterQuery::new( &filter_collection, - filter.as_ref(), - order, - range, - block, - query_id, + &self, + query.filter.as_ref(), + query.order, + query.range, + query.block, + query.query_id, + &self.site, )?; let query_clone = query.clone(); @@ -708,7 +731,7 @@ impl Layout { )), } })?; - let trace = log_query_timing(logger, &query_clone, start.elapsed(), values.len()); + let trace = log_query_timing(logger, &query_clone, start.elapsed(), values.len(), trace); let parent_type = filter_collection.parent_type()?.map(ColumnType::from); values @@ -756,11 +779,9 @@ impl Layout { let _section = stopwatch.start_section("update_modification_insert_query"); let mut count = 0; - // Each operation must respect the maximum number of bindings allowed in PostgreSQL queries, - // so we need to act in chunks whose size is defined by the number of entities times the - // number of attributes each entity type has. - // We add 1 to account for the `block_range` bind parameter - let chunk_size = POSTGRES_MAX_PARAMETERS / (table.columns.len() + 1); + // We insert the entities in chunks to make sure each operation does + // not exceed the maximum number of bindings allowed in queries + let chunk_size = InsertQuery::chunk_size(table); for chunk in entities.chunks_mut(chunk_size) { count += InsertQuery::new(table, chunk, block)?.execute(conn)?; } @@ -1204,6 +1225,10 @@ pub struct Table { /// Entities in this table are immutable, i.e., will never be updated or /// deleted pub(crate) immutable: bool, + + /// Whether this table has an explicit `causality_region` column. If `false`, then the column is + /// not present and the causality region for all rows is implicitly `0` (equivalent to CasualityRegion::ONCHAIN). + pub(crate) has_causality_region: bool, } impl Table { @@ -1214,6 +1239,7 @@ impl Table { enums: &EnumMap, id_types: &IdTypeMap, position: u32, + has_causality_region: bool, ) -> Result { SqlName::check_valid_identifier(&*defn.name, "object")?; @@ -1239,10 +1265,28 @@ impl Table { columns, position, immutable, + has_causality_region, }; Ok(table) } + /// Create a table that is like `self` except that its name in the + /// database is based on `namespace` and `name` + pub fn new_like(&self, namespace: &Namespace, name: &SqlName) -> Arc
{ + let other = Table { + object: self.object.clone(), + name: name.clone(), + qualified_name: SqlName::qualified_name(namespace, &name), + columns: self.columns.clone(), + is_account_like: self.is_account_like, + position: self.position, + immutable: self.immutable, + has_causality_region: self.has_causality_region, + }; + + Arc::new(other) + } + /// Find the column `name` in this table. The name must be in snake case, /// i.e., use SQL conventions pub fn column(&self, name: &SqlName) -> Option<&Column> { @@ -1290,6 +1334,21 @@ impl Table { .find(|column| column.is_primary_key()) .expect("every table has a primary key") } + + pub(crate) fn analyze(&self, conn: &PgConnection) -> Result<(), StoreError> { + let table_name = &self.qualified_name; + let sql = format!("analyze {table_name}"); + conn.execute(&sql)?; + Ok(()) + } + + pub(crate) fn block_column(&self) -> &SqlName { + if self.immutable { + &*crate::block_range::BLOCK_COLUMN_SQL + } else { + &*crate::block_range::BLOCK_RANGE_COLUMN_SQL + } + } } /// Return the enclosed named type for a field type, i.e., the type after @@ -1337,7 +1396,8 @@ impl LayoutCache { fn load(conn: &PgConnection, site: Arc) -> Result, StoreError> { let (subgraph_schema, use_bytea_prefix) = deployment::schema(conn, site.as_ref())?; - let catalog = Catalog::load(conn, site.clone(), use_bytea_prefix)?; + let has_causality_region = deployment::entities_with_causality_region(conn, site.id)?; + let catalog = Catalog::load(conn, site.clone(), use_bytea_prefix, has_causality_region)?; let layout = Arc::new(Layout::new(site.clone(), &subgraph_schema, catalog)?); layout.refresh(conn, site) } @@ -1419,6 +1479,14 @@ impl LayoutCache { } } + pub(crate) fn remove(&self, site: &Site) -> Option> { + self.entries + .lock() + .unwrap() + .remove(&site.deployment) + .map(|CacheEntry { value, expires: _ }| value.clone()) + } + // Only needed for tests #[cfg(debug_assertions)] pub(crate) fn clear(&self) { diff --git a/store/postgres/src/relational/ddl.rs b/store/postgres/src/relational/ddl.rs index 22db3769435..5b20be43d29 100644 --- a/store/postgres/src/relational/ddl.rs +++ b/store/postgres/src/relational/ddl.rs @@ -1,7 +1,11 @@ -use std::fmt::{self, Write}; +use std::{ + fmt::{self, Write}, + iter, +}; use graph::prelude::BLOCK_NUMBER_MAX; +use crate::block_range::CAUSALITY_REGION_COLUMN; use crate::relational::{ ColumnType, BLOCK_COLUMN, BLOCK_RANGE_COLUMN, BYTE_ARRAY_PREFIX_SIZE, STRING_PREFIX_SIZE, VID_COLUMN, @@ -9,6 +13,13 @@ use crate::relational::{ use super::{Column, Layout, SqlName, Table}; +// In debug builds (for testing etc.) unconditionally create exclusion constraints, in release +// builds for production, skip them +#[cfg(debug_assertions)] +const CREATE_EXCLUSION_CONSTRAINT: bool = true; +#[cfg(not(debug_assertions))] +const CREATE_EXCLUSION_CONSTRAINT: bool = false; + impl Layout { /// Generate the DDL for the entire layout, i.e., all `create table` /// and `create index` etc. statements needed in the database schema @@ -27,7 +38,7 @@ impl Layout { tables.sort_by_key(|table| table.position); // Output 'create table' statements for all tables for table in tables { - table.as_ddl(&mut out, self)?; + table.as_ddl(&mut out)?; } Ok(out) @@ -54,223 +65,239 @@ impl Layout { } impl Table { - /// Generate the DDL for one table, i.e. one `create table` statement - /// and all `create index` statements for the table's columns + /// Return an iterator over all the column names of this table /// - /// See the unit tests at the end of this file for the actual DDL that - /// gets generated - fn as_ddl(&self, out: &mut String, layout: &Layout) -> fmt::Result { + // This needs to stay in sync with `create_table` + pub(crate) fn column_names(&self) -> impl Iterator { + let block_column = if self.immutable { + BLOCK_COLUMN + } else { + BLOCK_RANGE_COLUMN + }; + let data_cols = self.columns.iter().map(|col| col.name.as_str()); + iter::once(VID_COLUMN) + .chain(data_cols) + .chain(iter::once(block_column)) + } + + // Changes to this function require changing `column_names`, too + pub(crate) fn create_table(&self, out: &mut String) -> fmt::Result { fn columns_ddl(table: &Table) -> Result { let mut cols = String::new(); let mut first = true; + + if table.has_causality_region { + first = false; + write!( + cols, + "{causality_region} int not null", + causality_region = CAUSALITY_REGION_COLUMN + )?; + } + for column in &table.columns { if !first { writeln!(cols, ",")?; - } else { - writeln!(cols)?; + write!(cols, " ")?; } - write!(cols, " ")?; column.as_ddl(&mut cols)?; first = false; } + Ok(cols) } - fn create_table(table: &Table, out: &mut String, layout: &Layout) -> fmt::Result { - if table.immutable { - writeln!( - out, - r#" - create table {nsp}.{name} ( - {vid} bigserial primary key, - {block} int not null, - {cols}, - unique({id}) - ); - "#, - nsp = layout.catalog.site.namespace, - name = table.name.quoted(), - cols = columns_ddl(table)?, - vid = VID_COLUMN, - block = BLOCK_COLUMN, - id = table.primary_key().name - ) - } else { - writeln!( - out, - r#" - create table {nsp}.{name} ( - {vid} bigserial primary key, - {block_range} int4range not null, - {cols} - ); - "#, - nsp = layout.catalog.site.namespace, - name = table.name.quoted(), - cols = columns_ddl(table)?, - vid = VID_COLUMN, - block_range = BLOCK_RANGE_COLUMN - )?; + if self.immutable { + writeln!( + out, + " + create table {qname} ( + {vid} bigserial primary key, + {block} int not null,\n\ + {cols}, + unique({id}) + );", + qname = self.qualified_name, + cols = columns_ddl(self)?, + vid = VID_COLUMN, + block = BLOCK_COLUMN, + id = self.primary_key().name + ) + } else { + writeln!( + out, + r#" + create table {qname} ( + {vid} bigserial primary key, + {block_range} int4range not null, + {cols} + );"#, + qname = self.qualified_name, + cols = columns_ddl(self)?, + vid = VID_COLUMN, + block_range = BLOCK_RANGE_COLUMN + )?; - table.exclusion_ddl( - out, - layout.catalog.site.namespace.as_str(), - layout.catalog.create_exclusion_constraint(), - ) - } + self.exclusion_ddl(out) } + } - fn create_time_travel_indexes( - table: &Table, - out: &mut String, - layout: &Layout, - ) -> fmt::Result { - if table.immutable { - write!( - out, - "create index brin_{table_name}\n \ - on {schema_name}.{table_name}\n \ - using brin({block}, vid);\n", - table_name = table.name, - schema_name = layout.catalog.site.namespace, - block = BLOCK_COLUMN - ) - } else { - // Add a BRIN index on the block_range bounds to exploit the fact - // that block ranges closely correlate with where in a table an - // entity appears physically. This index is incredibly efficient for - // reverts where we look for very recent blocks, so that this index - // is highly selective. See https://github.com/graphprotocol/graph-node/issues/1415#issuecomment-630520713 - // for details on one experiment. - // - // We do not index the `block_range` as a whole, but rather the lower - // and upper bound separately, since experimentation has shown that - // Postgres will not use the index on `block_range` for clauses like - // `block_range @> $block` but rather falls back to a full table scan. - // - // We also make sure that we do not put `NULL` in the index for - // the upper bound since nulls can not be compared to anything and - // will make the index less effective. - // - // To make the index usable, queries need to have clauses using - // `lower(block_range)` and `coalesce(..)` verbatim. - // - // We also index `vid` as that correlates with the order in which - // entities are stored. - write!(out,"create index brin_{table_name}\n \ - on {schema_name}.{table_name}\n \ - using brin(lower(block_range), coalesce(upper(block_range), {block_max}), vid);\n", - table_name = table.name, - schema_name = layout.catalog.site.namespace, - block_max = BLOCK_NUMBER_MAX)?; + fn create_time_travel_indexes(&self, out: &mut String) -> fmt::Result { + if self.immutable { + write!( + out, + "create index brin_{table_name}\n \ + on {qname}\n \ + using brin({block}, vid);\n", + table_name = self.name, + qname = self.qualified_name, + block = BLOCK_COLUMN + ) + } else { + // Add a BRIN index on the block_range bounds to exploit the fact + // that block ranges closely correlate with where in a table an + // entity appears physically. This index is incredibly efficient for + // reverts where we look for very recent blocks, so that this index + // is highly selective. See https://github.com/graphprotocol/graph-node/issues/1415#issuecomment-630520713 + // for details on one experiment. + // + // We do not index the `block_range` as a whole, but rather the lower + // and upper bound separately, since experimentation has shown that + // Postgres will not use the index on `block_range` for clauses like + // `block_range @> $block` but rather falls back to a full table scan. + // + // We also make sure that we do not put `NULL` in the index for + // the upper bound since nulls can not be compared to anything and + // will make the index less effective. + // + // To make the index usable, queries need to have clauses using + // `lower(block_range)` and `coalesce(..)` verbatim. + // + // We also index `vid` as that correlates with the order in which + // entities are stored. + write!(out,"create index brin_{table_name}\n \ + on {qname}\n \ + using brin(lower(block_range), coalesce(upper(block_range), {block_max}), vid);\n", + table_name = self.name, + qname = self.qualified_name, + block_max = BLOCK_NUMBER_MAX)?; - // Add a BTree index that helps with the `RevertClampQuery` by making - // it faster to find entity versions that have been modified - write!( - out, - "create index {table_name}_block_range_closed\n \ - on {schema_name}.{table_name}(coalesce(upper(block_range), {block_max}))\n \ - where coalesce(upper(block_range), {block_max}) < {block_max};\n", - table_name = table.name, - schema_name = layout.catalog.site.namespace, - block_max = BLOCK_NUMBER_MAX - ) - } + // Add a BTree index that helps with the `RevertClampQuery` by making + // it faster to find entity versions that have been modified + write!( + out, + "create index {table_name}_block_range_closed\n \ + on {qname}(coalesce(upper(block_range), {block_max}))\n \ + where coalesce(upper(block_range), {block_max}) < {block_max};\n", + table_name = self.name, + qname = self.qualified_name, + block_max = BLOCK_NUMBER_MAX + ) } + } - fn create_attribute_indexes( - table: &Table, - out: &mut String, - layout: &Layout, - ) -> fmt::Result { - // Create indexes. Skip columns whose type is an array of enum, - // since there is no good way to index them with Postgres 9.6. - // Once we move to Postgres 11, we can enable that - // (tracked in graph-node issue #1330) - for (i, column) in table - .columns - .iter() - .filter(|col| !(col.is_list() && col.is_enum())) - .enumerate() - { - if table.immutable && column.is_primary_key() { - // We create a unique index on `id` in `create_table` - // and don't need an explicit attribute index - continue; - } + fn create_attribute_indexes(&self, out: &mut String) -> fmt::Result { + // Create indexes. Skip columns whose type is an array of enum, + // since there is no good way to index them with Postgres 9.6. + // Once we move to Postgres 11, we can enable that + // (tracked in graph-node issue #1330) + for (i, column) in self + .columns + .iter() + .filter(|col| !(col.is_list() && col.is_enum())) + .enumerate() + { + if self.immutable && column.is_primary_key() { + // We create a unique index on `id` in `create_table` + // and don't need an explicit attribute index + continue; + } - let (method, index_expr) = if column.is_reference() && !column.is_list() { - // For foreign keys, index the key together with the block range - // since we almost always also have a block_range clause in - // queries that look for specific foreign keys - if table.immutable { - let index_expr = format!("{}, {}", column.name.quoted(), BLOCK_COLUMN); - ("btree", index_expr) - } else { - let index_expr = - format!("{}, {}", column.name.quoted(), BLOCK_RANGE_COLUMN); - ("gist", index_expr) - } + let (method, index_expr) = if column.is_reference() && !column.is_list() { + // For foreign keys, index the key together with the block range + // since we almost always also have a block_range clause in + // queries that look for specific foreign keys + if self.immutable { + let index_expr = format!("{}, {}", column.name.quoted(), BLOCK_COLUMN); + ("btree", index_expr) } else { - // Attributes that are plain strings or bytes are - // indexed with a BTree; but they can be too large for - // Postgres' limit on values that can go into a BTree. - // For those attributes, only index the first - // STRING_PREFIX_SIZE or BYTE_ARRAY_PREFIX_SIZE characters - // see: attr-bytea-prefix - let index_expr = if column.use_prefix_comparison { - match column.column_type { - ColumnType::String => { - format!("left({}, {})", column.name.quoted(), STRING_PREFIX_SIZE) - } - ColumnType::Bytes => format!( - "substring({}, 1, {})", - column.name.quoted(), - BYTE_ARRAY_PREFIX_SIZE - ), - _ => unreachable!("only String and Bytes can have arbitrary size"), + let index_expr = format!("{}, {}", column.name.quoted(), BLOCK_RANGE_COLUMN); + ("gist", index_expr) + } + } else { + // Attributes that are plain strings or bytes are + // indexed with a BTree; but they can be too large for + // Postgres' limit on values that can go into a BTree. + // For those attributes, only index the first + // STRING_PREFIX_SIZE or BYTE_ARRAY_PREFIX_SIZE characters + // see: attr-bytea-prefix + let index_expr = if column.use_prefix_comparison { + match column.column_type { + ColumnType::String => { + format!("left({}, {})", column.name.quoted(), STRING_PREFIX_SIZE) } - } else { - column.name.quoted() - }; - - let method = if column.is_list() || column.is_fulltext() { - "gin" - } else { - "btree" - }; + ColumnType::Bytes => format!( + "substring({}, 1, {})", + column.name.quoted(), + BYTE_ARRAY_PREFIX_SIZE + ), + _ => unreachable!("only String and Bytes can have arbitrary size"), + } + } else { + column.name.quoted() + }; - (method, index_expr) + let method = if column.is_list() || column.is_fulltext() { + "gin" + } else { + "btree" }; - write!( - out, - "create index attr_{table_index}_{column_index}_{table_name}_{column_name}\n on {schema_name}.\"{table_name}\" using {method}({index_expr});\n", - table_index = table.position, - table_name = table.name, - column_index = i, - column_name = column.name, - schema_name = layout.catalog.site.namespace, - method = method, - index_expr = index_expr, - )?; - } - writeln!(out) + + (method, index_expr) + }; + write!( + out, + "create index attr_{table_index}_{column_index}_{table_name}_{column_name}\n on {qname} using {method}({index_expr});\n", + table_index = self.position, + table_name = self.name, + column_index = i, + column_name = column.name, + qname = self.qualified_name, + method = method, + index_expr = index_expr, + )?; } + writeln!(out) + } + + /// Generate the DDL for one table, i.e. one `create table` statement + /// and all `create index` statements for the table's columns + /// + /// See the unit tests at the end of this file for the actual DDL that + /// gets generated + pub(crate) fn as_ddl(&self, out: &mut String) -> fmt::Result { + self.create_table(out)?; + self.create_time_travel_indexes(out)?; + self.create_attribute_indexes(out) + } + + pub fn exclusion_ddl(&self, out: &mut String) -> fmt::Result { + // Tables with causality regions need to use exclusion constraints for correctness, + // to catch violations of write isolation. + let as_constraint = self.has_causality_region || CREATE_EXCLUSION_CONSTRAINT; - create_table(self, out, layout)?; - create_time_travel_indexes(self, out, layout)?; - create_attribute_indexes(self, out, layout) + self.exclusion_ddl_inner(out, as_constraint) } - pub fn exclusion_ddl(&self, out: &mut String, nsp: &str, as_constraint: bool) -> fmt::Result { + // `pub` for tests. + pub(crate) fn exclusion_ddl_inner(&self, out: &mut String, as_constraint: bool) -> fmt::Result { if as_constraint { writeln!( out, - r#" - alter table {nsp}.{name} - add constraint {bare_name}_{id}_{block_range}_excl exclude using gist ({id} with =, {block_range} with &&); - "#, - name = self.name.quoted(), + " + alter table {qname} + add constraint {bare_name}_{id}_{block_range}_excl exclude using gist ({id} with =, {block_range} with &&);", + qname = self.qualified_name, bare_name = self.name, id = self.primary_key().name, block_range = BLOCK_RANGE_COLUMN @@ -278,11 +305,11 @@ impl Table { } else { writeln!( out, - r#" - create index {bare_name}_{id}_{block_range}_excl on {nsp}.{name} + " + create index {bare_name}_{id}_{block_range}_excl on {qname} using gist ({id}, {block_range}); - "#, - name = self.name.quoted(), + ", + qname = self.qualified_name, bare_name = self.name, id = self.primary_key().name, block_range = BLOCK_RANGE_COLUMN @@ -299,7 +326,6 @@ impl Column { /// See the unit tests at the end of this file for the actual DDL that /// gets generated fn as_ddl(&self, out: &mut String) -> fmt::Result { - write!(out, " ")?; write!(out, "{:20} {}", self.name.quoted(), self.sql_type())?; if self.is_list() { write!(out, "[]")?; diff --git a/store/postgres/src/relational/ddl_tests.rs b/store/postgres/src/relational/ddl_tests.rs index fb88e9cffce..72a3b8bed7b 100644 --- a/store/postgres/src/relational/ddl_tests.rs +++ b/store/postgres/src/relational/ddl_tests.rs @@ -1,4 +1,5 @@ use itertools::Itertools; +use pretty_assertions::assert_eq; use super::*; @@ -11,7 +12,8 @@ fn test_layout(gql: &str) -> Layout { let schema = Schema::parse(gql, subgraph.clone()).expect("Test schema invalid"); let namespace = Namespace::new("sgd0815".to_owned()).unwrap(); let site = Arc::new(make_dummy_site(subgraph, namespace, "anet".to_string())); - let catalog = Catalog::for_tests(site.clone()).expect("Can not create catalog"); + let catalog = Catalog::for_tests(site.clone(), BTreeSet::from_iter(["FileThing".into()])) + .expect("Can not create catalog"); Layout::new(site, &schema, catalog).expect("Failed to construct Layout") } @@ -41,23 +43,23 @@ fn table_is_sane() { assert!(table.column(&bad_sql_name).is_none()); } -#[test] -fn generate_ddl() { - // Check that the two strings are the same after replacing runs of - // whitespace with a single space - #[track_caller] - fn check_eqv(left: &str, right: &str) { - let left_s = left.split_whitespace().join(" "); - let right_s = right.split_whitespace().join(" "); - if left_s != right_s { - // Make sure the original strings show up in the error message - assert_eq!(left, right); - } +// Check that the two strings are the same after replacing runs of +// whitespace with a single space +#[track_caller] +fn check_eqv(left: &str, right: &str) { + let left_s = left.split_whitespace().join(" "); + let right_s = right.split_whitespace().join(" "); + if left_s != right_s { + // Make sure the original strings show up in the error message + assert_eq!(left, right); } +} +#[test] +fn generate_ddl() { let layout = test_layout(THING_GQL); let sql = layout.as_ddl().expect("Failed to generate DDL"); - check_eqv(THING_DDL, &sql); + assert_eq!(THING_DDL, &sql); // Use `assert_eq!` to also test the formatting. let layout = test_layout(MUSIC_GQL); let sql = layout.as_ddl().expect("Failed to generate DDL"); @@ -86,16 +88,22 @@ fn exlusion_ddl() { // When `as_constraint` is false, just create an index let mut out = String::new(); table - .exclusion_ddl(&mut out, "sgd0815", false) + .exclusion_ddl_inner(&mut out, false) .expect("can write exclusion DDL"); - assert_eq!("create index thing_id_block_range_excl on sgd0815.\"thing\"\n using gist (id, block_range);", out.trim()); + check_eqv( + r#"create index thing_id_block_range_excl on "sgd0815"."thing" using gist (id, block_range);"#, + out.trim(), + ); // When `as_constraint` is true, add an exclusion constraint let mut out = String::new(); table - .exclusion_ddl(&mut out, "sgd0815", true) + .exclusion_ddl_inner(&mut out, true) .expect("can write exclusion DDL"); - assert_eq!("alter table sgd0815.\"thing\"\n add constraint thing_id_block_range_excl exclude using gist (id with =, block_range with &&);", out.trim()); + check_eqv( + r#"alter table "sgd0815"."thing" add constraint thing_id_block_range_excl exclude using gist (id with =, block_range with &&);"#, + out.trim(), + ); } #[test] @@ -173,7 +181,7 @@ fn can_copy_from() { ); } -const THING_GQL: &str = " +const THING_GQL: &str = r#" type Thing @entity { id: ID! bigThing: Thing! @@ -192,82 +200,110 @@ const THING_GQL: &str = " bytes: Bytes, bigInt: BigInt, color: Color, - }"; + } + + type FileThing @entity { + id: ID! + } + "#; -const THING_DDL: &str = "create type sgd0815.\"color\" +const THING_DDL: &str = r#"create type sgd0815."color" as enum ('BLUE', 'red', 'yellow'); -create type sgd0815.\"size\" - as enum (\'large\', \'medium\', \'small\'); -create table sgd0815.\"thing\" ( +create type sgd0815."size" + as enum ('large', 'medium', 'small'); + + create table "sgd0815"."thing" ( vid bigserial primary key, block_range int4range not null, - \"id\" text not null, - \"big_thing\" text not null -); -alter table sgd0815.\"thing\" - add constraint thing_id_block_range_excl exclude using gist (id with =, block_range with &&); + "id" text not null, + "big_thing" text not null + ); + + alter table "sgd0815"."thing" + add constraint thing_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_thing - on sgd0815.thing + on "sgd0815"."thing" using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); create index thing_block_range_closed - on sgd0815.thing(coalesce(upper(block_range), 2147483647)) + on "sgd0815"."thing"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; create index attr_0_0_thing_id - on sgd0815.\"thing\" using btree(\"id\"); + on "sgd0815"."thing" using btree("id"); create index attr_0_1_thing_big_thing - on sgd0815.\"thing\" using gist(\"big_thing\", block_range); + on "sgd0815"."thing" using gist("big_thing", block_range); -create table sgd0815.\"scalar\" ( + + create table "sgd0815"."scalar" ( vid bigserial primary key, block_range int4range not null, - \"id\" text not null, - \"bool\" boolean, - \"int\" integer, - \"big_decimal\" numeric, - \"string\" text, - \"bytes\" bytea, - \"big_int\" numeric, - \"color\" \"sgd0815\".\"color\" -); -alter table sgd0815.\"scalar\" - add constraint scalar_id_block_range_excl exclude using gist (id with =, block_range with &&); + "id" text not null, + "bool" boolean, + "int" integer, + "big_decimal" numeric, + "string" text, + "bytes" bytea, + "big_int" numeric, + "color" "sgd0815"."color" + ); + + alter table "sgd0815"."scalar" + add constraint scalar_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_scalar - on sgd0815.scalar + on "sgd0815"."scalar" using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); create index scalar_block_range_closed - on sgd0815.scalar(coalesce(upper(block_range), 2147483647)) + on "sgd0815"."scalar"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; create index attr_1_0_scalar_id - on sgd0815.\"scalar\" using btree(\"id\"); + on "sgd0815"."scalar" using btree("id"); create index attr_1_1_scalar_bool - on sgd0815.\"scalar\" using btree(\"bool\"); + on "sgd0815"."scalar" using btree("bool"); create index attr_1_2_scalar_int - on sgd0815.\"scalar\" using btree(\"int\"); + on "sgd0815"."scalar" using btree("int"); create index attr_1_3_scalar_big_decimal - on sgd0815.\"scalar\" using btree(\"big_decimal\"); + on "sgd0815"."scalar" using btree("big_decimal"); create index attr_1_4_scalar_string - on sgd0815.\"scalar\" using btree(left(\"string\", 256)); + on "sgd0815"."scalar" using btree(left("string", 256)); create index attr_1_5_scalar_bytes - on sgd0815.\"scalar\" using btree(substring(\"bytes\", 1, 64)); + on "sgd0815"."scalar" using btree(substring("bytes", 1, 64)); create index attr_1_6_scalar_big_int - on sgd0815.\"scalar\" using btree(\"big_int\"); + on "sgd0815"."scalar" using btree("big_int"); create index attr_1_7_scalar_color - on sgd0815.\"scalar\" using btree(\"color\"); + on "sgd0815"."scalar" using btree("color"); + + + create table "sgd0815"."file_thing" ( + vid bigserial primary key, + block_range int4range not null, + causality_region int not null, + "id" text not null + ); + + alter table "sgd0815"."file_thing" + add constraint file_thing_id_block_range_excl exclude using gist (id with =, block_range with &&); +create index brin_file_thing + on "sgd0815"."file_thing" + using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); +create index file_thing_block_range_closed + on "sgd0815"."file_thing"(coalesce(upper(block_range), 2147483647)) + where coalesce(upper(block_range), 2147483647) < 2147483647; +create index attr_2_0_file_thing_id + on "sgd0815"."file_thing" using btree("id"); -"; +"#; -const MUSIC_GQL: &str = "type Musician @entity { +const MUSIC_GQL: &str = r#"type Musician @entity { id: ID! name: String! mainBand: Band bands: [Band!]! - writtenSongs: [Song]! @derivedFrom(field: \"writtenBy\") + writtenSongs: [Song]! @derivedFrom(field: "writtenBy") } type Band @entity { id: ID! name: String! - members: [Musician!]! @derivedFrom(field: \"bands\") + members: [Musician!]! @derivedFrom(field: "bands") originalSongs: [Song!]! } @@ -275,100 +311,100 @@ type Song @entity(immutable: true) { id: ID! title: String! writtenBy: Musician! - band: Band @derivedFrom(field: \"originalSongs\") + band: Band @derivedFrom(field: "originalSongs") } type SongStat @entity { id: ID! - song: Song @derivedFrom(field: \"id\") + song: Song @derivedFrom(field: "id") played: Int! -}"; -const MUSIC_DDL: &str = "create table sgd0815.\"musician\" ( +}"#; +const MUSIC_DDL: &str = r#"create table "sgd0815"."musician" ( vid bigserial primary key, block_range int4range not null, - \"id\" text not null, - \"name\" text not null, - \"main_band\" text, - \"bands\" text[] not null + "id" text not null, + "name" text not null, + "main_band" text, + "bands" text[] not null ); -alter table sgd0815.\"musician\" +alter table "sgd0815"."musician" add constraint musician_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_musician - on sgd0815.musician + on "sgd0815"."musician" using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); create index musician_block_range_closed - on sgd0815.musician(coalesce(upper(block_range), 2147483647)) + on "sgd0815"."musician"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; create index attr_0_0_musician_id - on sgd0815.\"musician\" using btree(\"id\"); + on "sgd0815"."musician" using btree("id"); create index attr_0_1_musician_name - on sgd0815.\"musician\" using btree(left(\"name\", 256)); + on "sgd0815"."musician" using btree(left("name", 256)); create index attr_0_2_musician_main_band - on sgd0815.\"musician\" using gist(\"main_band\", block_range); + on "sgd0815"."musician" using gist("main_band", block_range); create index attr_0_3_musician_bands - on sgd0815.\"musician\" using gin(\"bands\"); + on "sgd0815"."musician" using gin("bands"); -create table sgd0815.\"band\" ( +create table "sgd0815"."band" ( vid bigserial primary key, block_range int4range not null, - \"id\" text not null, - \"name\" text not null, - \"original_songs\" text[] not null + "id" text not null, + "name" text not null, + "original_songs" text[] not null ); -alter table sgd0815.\"band\" +alter table "sgd0815"."band" add constraint band_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_band - on sgd0815.band + on "sgd0815"."band" using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); create index band_block_range_closed - on sgd0815.band(coalesce(upper(block_range), 2147483647)) + on "sgd0815"."band"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; create index attr_1_0_band_id - on sgd0815.\"band\" using btree(\"id\"); + on "sgd0815"."band" using btree("id"); create index attr_1_1_band_name - on sgd0815.\"band\" using btree(left(\"name\", 256)); + on "sgd0815"."band" using btree(left("name", 256)); create index attr_1_2_band_original_songs - on sgd0815.\"band\" using gin(\"original_songs\"); + on "sgd0815"."band" using gin("original_songs"); -create table sgd0815.\"song\" ( +create table "sgd0815"."song" ( vid bigserial primary key, block$ int not null, - \"id\" text not null, - \"title\" text not null, - \"written_by\" text not null, + "id" text not null, + "title" text not null, + "written_by" text not null, unique(id) ); create index brin_song - on sgd0815.song + on "sgd0815"."song" using brin(block$, vid); create index attr_2_1_song_title - on sgd0815.\"song\" using btree(left(\"title\", 256)); + on "sgd0815"."song" using btree(left("title", 256)); create index attr_2_2_song_written_by - on sgd0815.\"song\" using btree(\"written_by\", block$); + on "sgd0815"."song" using btree("written_by", block$); -create table sgd0815.\"song_stat\" ( +create table "sgd0815"."song_stat" ( vid bigserial primary key, block_range int4range not null, - \"id\" text not null, - \"played\" integer not null + "id" text not null, + "played" integer not null ); -alter table sgd0815.\"song_stat\" +alter table "sgd0815"."song_stat" add constraint song_stat_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_song_stat - on sgd0815.song_stat + on "sgd0815"."song_stat" using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); create index song_stat_block_range_closed - on sgd0815.song_stat(coalesce(upper(block_range), 2147483647)) + on "sgd0815"."song_stat"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; create index attr_3_0_song_stat_id - on sgd0815.\"song_stat\" using btree(\"id\"); + on "sgd0815"."song_stat" using btree("id"); create index attr_3_1_song_stat_played - on sgd0815.\"song_stat\" using btree(\"played\"); + on "sgd0815"."song_stat" using btree("played"); -"; +"#; -const FOREST_GQL: &str = " +const FOREST_GQL: &str = r#" interface ForestDweller { id: ID!, forest: Forest @@ -380,84 +416,84 @@ type Animal implements ForestDweller @entity { type Forest @entity { id: ID!, # Array of interfaces as derived reference - dwellers: [ForestDweller!]! @derivedFrom(field: \"forest\") + dwellers: [ForestDweller!]! @derivedFrom(field: "forest") } type Habitat @entity { id: ID!, # Use interface as direct reference most_common: ForestDweller!, dwellers: [ForestDweller!]! -}"; +}"#; -const FOREST_DDL: &str = "create table sgd0815.\"animal\" ( +const FOREST_DDL: &str = r#"create table "sgd0815"."animal" ( vid bigserial primary key, block_range int4range not null, - \"id\" text not null, - \"forest\" text + "id" text not null, + "forest" text ); -alter table sgd0815.\"animal\" +alter table "sgd0815"."animal" add constraint animal_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_animal - on sgd0815.animal + on "sgd0815"."animal" using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); create index animal_block_range_closed - on sgd0815.animal(coalesce(upper(block_range), 2147483647)) + on "sgd0815"."animal"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; create index attr_0_0_animal_id - on sgd0815.\"animal\" using btree(\"id\"); + on "sgd0815"."animal" using btree("id"); create index attr_0_1_animal_forest - on sgd0815.\"animal\" using gist(\"forest\", block_range); + on "sgd0815"."animal" using gist("forest", block_range); -create table sgd0815.\"forest\" ( +create table "sgd0815"."forest" ( vid bigserial primary key, block_range int4range not null, - \"id\" text not null + "id" text not null ); -alter table sgd0815.\"forest\" +alter table "sgd0815"."forest" add constraint forest_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_forest - on sgd0815.forest + on "sgd0815"."forest" using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); create index forest_block_range_closed - on sgd0815.forest(coalesce(upper(block_range), 2147483647)) + on "sgd0815"."forest"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; create index attr_1_0_forest_id - on sgd0815.\"forest\" using btree(\"id\"); + on "sgd0815"."forest" using btree("id"); -create table sgd0815.\"habitat\" ( +create table "sgd0815"."habitat" ( vid bigserial primary key, block_range int4range not null, - \"id\" text not null, - \"most_common\" text not null, - \"dwellers\" text[] not null + "id" text not null, + "most_common" text not null, + "dwellers" text[] not null ); -alter table sgd0815.\"habitat\" +alter table "sgd0815"."habitat" add constraint habitat_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_habitat - on sgd0815.habitat + on "sgd0815"."habitat" using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); create index habitat_block_range_closed - on sgd0815.habitat(coalesce(upper(block_range), 2147483647)) + on "sgd0815"."habitat"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; create index attr_2_0_habitat_id - on sgd0815.\"habitat\" using btree(\"id\"); + on "sgd0815"."habitat" using btree("id"); create index attr_2_1_habitat_most_common - on sgd0815.\"habitat\" using gist(\"most_common\", block_range); + on "sgd0815"."habitat" using gist("most_common", block_range); create index attr_2_2_habitat_dwellers - on sgd0815.\"habitat\" using gin(\"dwellers\"); + on "sgd0815"."habitat" using gin("dwellers"); -"; -const FULLTEXT_GQL: &str = " +"#; +const FULLTEXT_GQL: &str = r#" type _Schema_ @fulltext( - name: \"search\" + name: "search" language: en algorithm: rank - include: [\ + include: [ { - entity: \"Animal\", + entity: "Animal", fields: [ - {name: \"name\"}, - {name: \"species\"} + {name: "name"}, + {name: "species"} ] } ] @@ -470,84 +506,84 @@ type Animal @entity { } type Forest @entity { id: ID!, - dwellers: [Animal!]! @derivedFrom(field: \"forest\") + dwellers: [Animal!]! @derivedFrom(field: "forest") } type Habitat @entity { id: ID!, most_common: Animal!, dwellers: [Animal!]! -}"; +}"#; -const FULLTEXT_DDL: &str = "create table sgd0815.\"animal\" ( +const FULLTEXT_DDL: &str = r#"create table "sgd0815"."animal" ( vid bigserial primary key, block_range int4range not null, - \"id\" text not null, - \"name\" text not null, - \"species\" text not null, - \"forest\" text, - \"search\" tsvector + "id" text not null, + "name" text not null, + "species" text not null, + "forest" text, + "search" tsvector ); -alter table sgd0815.\"animal\" +alter table "sgd0815"."animal" add constraint animal_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_animal - on sgd0815.animal + on "sgd0815"."animal" using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); create index animal_block_range_closed - on sgd0815.animal(coalesce(upper(block_range), 2147483647)) + on "sgd0815"."animal"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; create index attr_0_0_animal_id - on sgd0815.\"animal\" using btree(\"id\"); + on "sgd0815"."animal" using btree("id"); create index attr_0_1_animal_name - on sgd0815.\"animal\" using btree(left(\"name\", 256)); + on "sgd0815"."animal" using btree(left("name", 256)); create index attr_0_2_animal_species - on sgd0815.\"animal\" using btree(left(\"species\", 256)); + on "sgd0815"."animal" using btree(left("species", 256)); create index attr_0_3_animal_forest - on sgd0815.\"animal\" using gist(\"forest\", block_range); + on "sgd0815"."animal" using gist("forest", block_range); create index attr_0_4_animal_search - on sgd0815.\"animal\" using gin(\"search\"); + on "sgd0815"."animal" using gin("search"); -create table sgd0815.\"forest\" ( +create table "sgd0815"."forest" ( vid bigserial primary key, block_range int4range not null, - \"id\" text not null + "id" text not null ); -alter table sgd0815.\"forest\" +alter table "sgd0815"."forest" add constraint forest_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_forest - on sgd0815.forest + on "sgd0815"."forest" using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); create index forest_block_range_closed - on sgd0815.forest(coalesce(upper(block_range), 2147483647)) + on "sgd0815"."forest"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; create index attr_1_0_forest_id - on sgd0815.\"forest\" using btree(\"id\"); + on "sgd0815"."forest" using btree("id"); -create table sgd0815.\"habitat\" ( +create table "sgd0815"."habitat" ( vid bigserial primary key, block_range int4range not null, - \"id\" text not null, - \"most_common\" text not null, - \"dwellers\" text[] not null + "id" text not null, + "most_common" text not null, + "dwellers" text[] not null ); -alter table sgd0815.\"habitat\" +alter table "sgd0815"."habitat" add constraint habitat_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_habitat - on sgd0815.habitat + on "sgd0815"."habitat" using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); create index habitat_block_range_closed - on sgd0815.habitat(coalesce(upper(block_range), 2147483647)) + on "sgd0815"."habitat"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; create index attr_2_0_habitat_id - on sgd0815.\"habitat\" using btree(\"id\"); + on "sgd0815"."habitat" using btree("id"); create index attr_2_1_habitat_most_common - on sgd0815.\"habitat\" using gist(\"most_common\", block_range); + on "sgd0815"."habitat" using gist("most_common", block_range); create index attr_2_2_habitat_dwellers - on sgd0815.\"habitat\" using gin(\"dwellers\"); + on "sgd0815"."habitat" using gin("dwellers"); -"; +"#; -const FORWARD_ENUM_GQL: &str = " +const FORWARD_ENUM_GQL: &str = r#" type Thing @entity { id: ID!, orientation: Orientation! @@ -556,27 +592,27 @@ type Thing @entity { enum Orientation { UP, DOWN } -"; +"#; -const FORWARD_ENUM_SQL: &str = "create type sgd0815.\"orientation\" - as enum (\'DOWN\', \'UP\'); -create table sgd0815.\"thing\" ( +const FORWARD_ENUM_SQL: &str = r#"create type sgd0815."orientation" + as enum ('DOWN', 'UP'); +create table "sgd0815"."thing" ( vid bigserial primary key, block_range int4range not null, - \"id\" text not null, - \"orientation\" \"sgd0815\".\"orientation\" not null + "id" text not null, + "orientation" "sgd0815"."orientation" not null ); -alter table sgd0815.\"thing\" +alter table "sgd0815"."thing" add constraint thing_id_block_range_excl exclude using gist (id with =, block_range with &&); create index brin_thing - on sgd0815.thing + on "sgd0815"."thing" using brin(lower(block_range), coalesce(upper(block_range), 2147483647), vid); create index thing_block_range_closed - on sgd0815.thing(coalesce(upper(block_range), 2147483647)) + on "sgd0815"."thing"(coalesce(upper(block_range), 2147483647)) where coalesce(upper(block_range), 2147483647) < 2147483647; create index attr_0_0_thing_id - on sgd0815.\"thing\" using btree(\"id\"); + on "sgd0815"."thing" using btree("id"); create index attr_0_1_thing_orientation - on sgd0815.\"thing\" using btree(\"orientation\"); + on "sgd0815"."thing" using btree("orientation"); -"; +"#; diff --git a/store/postgres/src/relational/index.rs b/store/postgres/src/relational/index.rs new file mode 100644 index 00000000000..a1a4e27d7de --- /dev/null +++ b/store/postgres/src/relational/index.rs @@ -0,0 +1,845 @@ +//! Parse Postgres index definition into a form that is meaningful for us. +use std::fmt::{Display, Write}; + +use graph::itertools::Itertools; +use graph::prelude::{ + lazy_static, + regex::{Captures, Regex}, + BlockNumber, +}; + +use crate::block_range::{BLOCK_COLUMN, BLOCK_RANGE_COLUMN}; +use crate::relational::{BYTE_ARRAY_PREFIX_SIZE, STRING_PREFIX_SIZE}; + +use super::VID_COLUMN; + +#[derive(Debug, PartialEq)] +pub enum Method { + Brin, + BTree, + Gin, + Gist, + Unknown(String), +} + +impl Display for Method { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + use Method::*; + match self { + Brin => write!(f, "brin")?, + BTree => write!(f, "btree")?, + Gin => write!(f, "gin")?, + Gist => write!(f, "gist")?, + Unknown(s) => write!(f, "{s}")?, + } + Ok(()) + } +} + +impl Method { + fn parse(method: String) -> Self { + method.parse().unwrap_or_else(|()| Method::Unknown(method)) + } +} + +impl std::str::FromStr for Method { + type Err = (); + + fn from_str(method: &str) -> Result { + use Method::*; + + match method { + "brin" => Ok(Brin), + "btree" => Ok(BTree), + "gin" => Ok(Gin), + "gist" => Ok(Gist), + _ => Err(()), + } + } +} + +#[derive(Clone, Debug, PartialEq)] +pub enum PrefixKind { + Left, + Substring, +} + +impl PrefixKind { + fn parse(kind: &str) -> Option { + use PrefixKind::*; + + match kind { + "substring" => Some(Substring), + "left" => Some(Left), + _ => None, + } + } + + fn to_sql(&self, name: &str) -> String { + use PrefixKind::*; + + match self { + Left => format!("left({name}, {})", STRING_PREFIX_SIZE), + Substring => format!("substring({name}, 1, {})", BYTE_ARRAY_PREFIX_SIZE), + } + } +} + +/// An index expression, i.e., a 'column' in an index +#[derive(Clone, Debug, PartialEq)] +pub enum Expr { + /// A named column; only user-defined columns appear here + Column(String), + /// A prefix of a named column, used for indexes on `text` and `bytea` + Prefix(String, PrefixKind), + /// The `vid` column + Vid, + /// The `block$` column + Block, + /// The `block_range` column + BlockRange, + /// The expression `lower(block_range)` + BlockRangeLower, + /// The expression `coalesce(upper(block_range), 2147483647)` + BlockRangeUpper, + /// The literal index expression since none of the previous options + /// matched + Unknown(String), +} + +impl Display for Expr { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Expr::Column(s) => write!(f, "{s}")?, + Expr::Prefix(s, _) => write!(f, "{s}")?, + Expr::Vid => write!(f, "vid")?, + Expr::Block => write!(f, "block")?, + Expr::BlockRange => write!(f, "block_range")?, + Expr::BlockRangeLower => write!(f, "lower(block_range)")?, + Expr::BlockRangeUpper => write!(f, "upper(block_range)")?, + Expr::Unknown(e) => write!(f, "{e}")?, + } + Ok(()) + } +} + +impl Expr { + fn parse(expr: &str) -> Self { + use Expr::*; + + let expr = expr.trim().to_string(); + + let prefix_rx = Regex::new("^(?Psubstring|left)\\((?P[a-z0-9$_]+)").unwrap(); + + if expr == VID_COLUMN { + Vid + } else if expr == "lower(block_range)" { + BlockRangeLower + } else if expr == "coalesce(upper(block_range), 2147483647)" { + BlockRangeUpper + } else if expr == "block_range" { + BlockRange + } else if expr == "block$" { + Block + } else if expr + .chars() + .all(|c| c.is_ascii_alphanumeric() || c == '$' || c == '_') + { + Column(expr) + } else if let Some(caps) = prefix_rx.captures(&expr) { + if let Some(name) = caps.name("name") { + let kind = caps + .name("kind") + .and_then(|op| PrefixKind::parse(op.as_str())); + match kind { + Some(kind) => Prefix(name.as_str().to_string(), kind), + None => Unknown(expr), + } + } else { + Unknown(expr) + } + } else { + Unknown(expr) + } + } + + fn is_attribute(&self) -> bool { + use Expr::*; + + match self { + Column(_) | Prefix(_, _) => true, + Vid | Block | BlockRange | BlockRangeLower | BlockRangeUpper | Unknown(_) => false, + } + } + + fn is_id(&self) -> bool { + use Expr::*; + match self { + Column(s) => s == "id", + _ => false, + } + } + + fn to_sql(&self) -> String { + match self { + Expr::Column(name) => name.to_string(), + Expr::Prefix(name, kind) => kind.to_sql(name), + Expr::Vid => VID_COLUMN.to_string(), + Expr::Block => BLOCK_COLUMN.to_string(), + Expr::BlockRange => BLOCK_RANGE_COLUMN.to_string(), + Expr::BlockRangeLower => "lower(block_range)".to_string(), + Expr::BlockRangeUpper => "coalesce(upper(block_range), 2147483647)".to_string(), + Expr::Unknown(expr) => expr.to_string(), + } + } +} + +/// The condition for a partial index, i.e., the statement after `where ..` +/// in a `create index` statement +#[derive(Debug, PartialEq)] +pub enum Cond { + /// The expression `coalesce(upper(block_range), 2147483647) > $number` + Partial(BlockNumber), + /// The expression `coalesce(upper(block_range), 2147483647) < 2147483647` + Closed, + /// Any other expression + Unknown(String), +} + +impl Display for Cond { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + use Cond::*; + + match self { + Partial(number) => write!(f, "upper(block_range) > {number}"), + Closed => write!(f, "closed(block_range)"), + Unknown(s) => write!(f, "{s}"), + } + } +} + +impl Cond { + fn parse(cond: String) -> Self { + fn parse_partial(cond: &str) -> Option { + let cond_rx = + Regex::new("coalesce\\(upper\\(block_range\\), 2147483647\\) > (?P[0-9]+)") + .unwrap(); + + let caps = cond_rx.captures(cond)?; + caps.name("number") + .map(|number| number.as_str()) + .and_then(|number| number.parse::().ok()) + .map(|number| Cond::Partial(number)) + } + + if &cond == "coalesce(upper(block_range), 2147483647) < 2147483647" { + Cond::Closed + } else { + parse_partial(&cond).unwrap_or_else(|| Cond::Unknown(cond)) + } + } + + fn to_sql(&self) -> String { + match self { + Cond::Partial(number) => format!("coalesce(upper(block_range), 2147483647) > {number}"), + Cond::Closed => "coalesce(upper(block_range), 2147483647) < 2147483647".to_string(), + Cond::Unknown(cond) => cond.to_string(), + } + } +} + +#[derive(Debug, PartialEq)] +pub enum CreateIndex { + /// The literal index definition passed to `parse`. This is used when we + /// can't parse a `create index` statement, e.g. because it uses + /// features we don't care about. + Unknown { defn: String }, + /// Representation of a `create index` statement that we successfully + /// parsed. + Parsed { + /// Is this a `unique` index + unique: bool, + /// The name of the index + name: String, + /// The namespace of the table to which this index belongs + nsp: String, + /// The name of the table to which this index belongs + table: String, + /// The index method + method: Method, + /// The columns (or more generally expressions) that are indexed + columns: Vec, + /// The condition for partial indexes + cond: Option, + /// Storage parameters for the index + with: Option, + }, +} + +impl Display for CreateIndex { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + use CreateIndex::*; + + match self { + Unknown { defn } => { + writeln!(f, "{defn}")?; + } + Parsed { + unique, + name, + nsp: _, + table: _, + method, + columns, + cond, + with, + } => { + let columns = columns.into_iter().map(|c| c.to_string()).join(", "); + let unique = if *unique { "[uq]" } else { "" }; + write!(f, "{name}{unique} {method}({columns})")?; + if let Some(cond) = cond { + write!(f, " where {cond}")?; + } + if let Some(with) = with { + write!(f, " with {with}")?; + } + writeln!(f, "")?; + } + } + Ok(()) + } +} + +impl CreateIndex { + /// Parse a `create index` statement. We are mostly concerned with + /// parsing indexes that `graph-node` created. If we can't parse an + /// index definition, it is returned as `CreateIndex::Unknown`. + /// + /// The `defn` should be formatted the way it is formatted in Postgres' + /// `pg_indexes.indexdef` system catalog; it's likely that deviating + /// from that formatting will make the index definition not parse + /// properly and return a `CreateIndex::Unknown`. + pub fn parse(mut defn: String) -> Self { + fn field(cap: &Captures, name: &str) -> Option { + cap.name(name).map(|mtch| mtch.as_str().to_string()) + } + + fn split_columns(s: &str) -> Vec { + let mut parens = 0; + let mut column = String::new(); + let mut columns = Vec::new(); + + for c in s.chars() { + match c { + '"' => { /* strip double quotes */ } + '(' => { + parens += 1; + column.push(c); + } + ')' => { + parens -= 1; + column.push(c); + } + ',' if parens == 0 => { + columns.push(Expr::parse(&column)); + column = String::new(); + } + _ => column.push(c), + } + } + columns.push(Expr::parse(&column)); + + columns + } + + fn new_parsed(defn: &str) -> Option { + let rx = Regex::new( + "create (?Punique )?index (?P[a-z0-9$_]+) \ + on (?Psgd[0-9]+)\\.(?P
[a-z$_]+) \ + using (?P[a-z]+) \\((?P.*?)\\)\ + ( where \\((?P.*)\\))?\ + ( with \\((?P.*)\\))?$", + ) + .unwrap(); + + let cap = rx.captures(&defn)?; + let unique = cap.name("unique").is_some(); + let name = field(&cap, "name")?; + let nsp = field(&cap, "nsp")?; + let table = field(&cap, "table")?; + let columns = field(&cap, "columns")?; + let method = Method::parse(field(&cap, "method")?); + let cond = field(&cap, "cond").map(Cond::parse); + let with = field(&cap, "with"); + + let columns = split_columns(&columns); + Some(CreateIndex::Parsed { + unique, + name, + nsp, + table, + method, + columns, + cond, + with, + }) + } + + defn.make_ascii_lowercase(); + new_parsed(&defn).unwrap_or_else(|| CreateIndex::Unknown { defn }) + } + + pub fn create>>( + name: &str, + nsp: &str, + table: &str, + unique: bool, + method: Method, + columns: C, + cond: Option, + with: Option, + ) -> Self { + CreateIndex::Parsed { + unique, + name: name.to_string(), + nsp: nsp.to_string(), + table: table.to_string(), + method, + columns: columns.into(), + cond, + with, + } + } + + pub fn is_attribute_index(&self) -> bool { + use CreateIndex::*; + match self { + Unknown { defn: _ } => false, + Parsed { + columns, + cond, + with, + method, + .. + } => { + if cond.is_some() || with.is_some() { + return false; + } + match method { + Method::Gist => { + columns.len() == 2 + && columns[0].is_attribute() + && !columns[0].is_id() + && columns[1] == Expr::BlockRange + } + Method::Brin => false, + Method::BTree | Method::Gin => { + columns.len() == 1 + && columns[0].is_attribute() + && cond.is_none() + && with.is_none() + } + Method::Unknown(_) => false, + } + } + } + } + + /// Return `true` if `self` is one of the indexes we create by default + pub fn is_default_index(&self) -> bool { + lazy_static! { + static ref DEFAULT_INDEXES: Vec = { + fn dummy( + unique: bool, + method: Method, + columns: &[Expr], + cond: Option, + ) -> CreateIndex { + CreateIndex::create( + "dummy_index", + "dummy_nsp", + "dummy_table", + unique, + method, + columns, + cond, + None, + ) + } + use Method::*; + + vec![ + dummy( + false, + Brin, + &[Expr::BlockRangeLower, Expr::BlockRangeUpper, Expr::Vid], + None, + ), + dummy(true, BTree, &[Expr::Vid], None), + dummy( + false, + Gist, + &[Expr::Column("id".to_string()), Expr::BlockRange], + None, + ), + dummy(false, BTree, &[Expr::BlockRangeUpper], Some(Cond::Closed)), + ] + }; + } + + self.is_attribute_index() || DEFAULT_INDEXES.iter().any(|idx| self.is_same_index(idx)) + } + + fn is_same_index(&self, other: &CreateIndex) -> bool { + match (self, other) { + (CreateIndex::Unknown { .. }, _) | (_, CreateIndex::Unknown { .. }) => false, + ( + CreateIndex::Parsed { + unique, + name: _, + nsp: _, + table: _, + method, + columns, + cond, + with, + }, + CreateIndex::Parsed { + unique: o_unique, + name: _, + nsp: _, + table: _, + method: o_method, + columns: o_columns, + cond: o_cond, + with: o_with, + }, + ) => { + unique == o_unique + && method == o_method + && columns == o_columns + && cond == o_cond + && with == o_with + } + } + } + + /// Generate a SQL statement that creates this index. If `concurrent` is + /// `true`, make it a concurrent index creation. If `if_not_exists` is + /// `true` add a `if not exists` clause to the index creation. + pub fn to_sql(&self, concurrent: bool, if_not_exists: bool) -> Result { + match self { + CreateIndex::Unknown { defn } => Ok(defn.to_string()), + CreateIndex::Parsed { + unique, + name, + nsp, + table, + method, + columns, + cond, + with, + } => { + let unique = if *unique { "unique " } else { "" }; + let concurrent = if concurrent { "concurrently " } else { "" }; + let if_not_exists = if if_not_exists { "if not exists " } else { "" }; + let columns = columns.into_iter().map(|c| c.to_sql()).join(", "); + + let mut sql = format!("create {unique}index {concurrent}{if_not_exists}{name} on {nsp}.{table} using {method} ({columns})"); + if let Some(with) = with { + write!(sql, " with ({with})")?; + } + if let Some(cond) = cond { + write!(sql, " where ({})", cond.to_sql())?; + } + Ok(sql) + } + } + } +} + +#[test] +fn parse() { + use Method::*; + + #[derive(Debug)] + enum TestExpr { + Name(&'static str), + Prefix(&'static str, &'static str), + Vid, + Block, + BlockRange, + BlockRangeLower, + BlockRangeUpper, + #[allow(dead_code)] + Unknown(&'static str), + } + + impl<'a> From<&'a TestExpr> for Expr { + fn from(expr: &'a TestExpr) -> Self { + match expr { + TestExpr::Name(name) => Expr::Column(name.to_string()), + TestExpr::Prefix(name, kind) => { + Expr::Prefix(name.to_string(), PrefixKind::parse(kind).unwrap()) + } + TestExpr::Vid => Expr::Vid, + TestExpr::Block => Expr::Block, + TestExpr::BlockRange => Expr::BlockRange, + TestExpr::BlockRangeLower => Expr::BlockRangeLower, + TestExpr::BlockRangeUpper => Expr::BlockRangeUpper, + TestExpr::Unknown(s) => Expr::Unknown(s.to_string()), + } + } + } + + #[derive(Debug)] + enum TestCond { + Partial(BlockNumber), + Closed, + Unknown(&'static str), + } + + impl From for Cond { + fn from(expr: TestCond) -> Self { + match expr { + TestCond::Partial(number) => Cond::Partial(number), + TestCond::Unknown(s) => Cond::Unknown(s.to_string()), + TestCond::Closed => Cond::Closed, + } + } + } + + #[derive(Debug)] + struct Parsed { + unique: bool, + name: &'static str, + nsp: &'static str, + table: &'static str, + method: Method, + columns: &'static [TestExpr], + cond: Option, + } + + impl From for CreateIndex { + fn from(p: Parsed) -> Self { + let Parsed { + unique, + name, + nsp, + table, + method, + columns, + cond, + } = p; + let columns: Vec<_> = columns.into_iter().map(|c| Expr::from(c)).collect(); + let cond = cond.map(Cond::from); + CreateIndex::Parsed { + unique, + name: name.to_string(), + nsp: nsp.to_string(), + table: table.to_string(), + method, + columns, + cond, + with: None, + } + } + } + + #[track_caller] + fn parse_one(defn: &str, exp: Parsed) { + let act = CreateIndex::parse(defn.to_string()); + let exp = CreateIndex::from(exp); + assert_eq!(exp, act); + + let defn = defn.replace("\"", "").to_ascii_lowercase(); + assert_eq!(defn, act.to_sql(false, false).unwrap()); + } + + use TestCond::*; + use TestExpr::*; + + let sql = "create index attr_1_0_token_id on sgd44.token using btree (id)"; + let exp = Parsed { + unique: false, + name: "attr_1_0_token_id", + nsp: "sgd44", + table: "token", + method: BTree, + columns: &[Name("id")], + cond: None, + }; + parse_one(sql, exp); + + let sql = + "create index attr_1_1_token_symbol on sgd44.token using btree (\"left\"(symbol, 256))"; + let exp = Parsed { + unique: false, + name: "attr_1_1_token_symbol", + nsp: "sgd44", + table: "token", + method: BTree, + columns: &[Prefix("symbol", "left")], + cond: None, + }; + parse_one(sql, exp); + + let sql = "create index attr_1_5_token_trade_volume on sgd44.token using btree (trade_volume)"; + let exp = Parsed { + unique: false, + name: "attr_1_5_token_trade_volume", + nsp: "sgd44", + table: "token", + method: BTree, + columns: &[Name("trade_volume")], + cond: None, + }; + parse_one(sql, exp); + + let sql = "create unique index token_pkey on sgd44.token using btree (vid)"; + let exp = Parsed { + unique: true, + name: "token_pkey", + nsp: "sgd44", + table: "token", + method: BTree, + columns: &[Vid], + cond: None, + }; + parse_one(sql, exp); + + let sql = "create index brin_token on sgd44.token using brin (lower(block_range), coalesce(upper(block_range), 2147483647), vid)"; + let exp = Parsed { + unique: false, + name: "brin_token", + nsp: "sgd44", + table: "token", + method: Brin, + columns: &[BlockRangeLower, BlockRangeUpper, Vid], + cond: None, + }; + parse_one(sql, exp); + + let sql = "create index token_block_range_closed on sgd44.token using btree (coalesce(upper(block_range), 2147483647)) where (coalesce(upper(block_range), 2147483647) < 2147483647)"; + let exp = Parsed { + unique: false, + name: "token_block_range_closed", + nsp: "sgd44", + table: "token", + method: BTree, + columns: &[BlockRangeUpper], + cond: Some(Closed), + }; + parse_one(sql, exp); + + let sql = "create index token_id_block_range_excl on sgd44.token using gist (id, block_range)"; + let exp = Parsed { + unique: false, + name: "token_id_block_range_excl", + nsp: "sgd44", + table: "token", + method: Gist, + columns: &[Name("id"), BlockRange], + cond: None, + }; + parse_one(sql, exp); + + let sql="create index attr_1_11_pool_owner on sgd411585.pool using btree (\"substring\"(owner, 1, 64))"; + let exp = Parsed { + unique: false, + name: "attr_1_11_pool_owner", + nsp: "sgd411585", + table: "pool", + method: BTree, + columns: &[Prefix("owner", "substring")], + cond: None, + }; + parse_one(sql, exp); + + let sql = + "create index attr_1_20_pool_vault_id on sgd411585.pool using gist (vault_id, block_range)"; + let exp = Parsed { + unique: false, + name: "attr_1_20_pool_vault_id", + nsp: "sgd411585", + table: "pool", + method: Gist, + columns: &[Name("vault_id"), BlockRange], + cond: None, + }; + parse_one(sql, exp); + + let sql = "create index attr_1_22_pool_tokens_list on sgd411585.pool using gin (tokens_list)"; + let exp = Parsed { + unique: false, + name: "attr_1_22_pool_tokens_list", + nsp: "sgd411585", + table: "pool", + method: Gin, + columns: &[Name("tokens_list")], + cond: None, + }; + parse_one(sql, exp); + + let sql = "create index manual_partial_pool_total_liquidity on sgd411585.pool using btree (total_liquidity) where (coalesce(upper(block_range), 2147483647) > 15635000)"; + let exp = Parsed { + unique: false, + name: "manual_partial_pool_total_liquidity", + nsp: "sgd411585", + table: "pool", + method: BTree, + columns: &[Name("total_liquidity")], + cond: Some(Partial(15635000)), + }; + parse_one(sql, exp); + + let sql = "create index manual_swap_pool_timestamp_id on sgd217942.swap using btree (pool, \"timestamp\", id)"; + let exp = Parsed { + unique: false, + name: "manual_swap_pool_timestamp_id", + nsp: "sgd217942", + table: "swap", + method: BTree, + columns: &[Name("pool"), Name("timestamp"), Name("id")], + cond: None, + }; + parse_one(sql, exp); + + let sql = "CREATE INDEX brin_scy ON sgd314614.scy USING brin (\"block$\", vid)"; + let exp = Parsed { + unique: false, + name: "brin_scy", + nsp: "sgd314614", + table: "scy", + method: Brin, + columns: &[Block, Vid], + cond: None, + }; + parse_one(sql, exp); + + let sql = + "CREATE INDEX brin_scy ON sgd314614.scy USING brin (\"block$\", vid) where (amount > 0)"; + let exp = Parsed { + unique: false, + name: "brin_scy", + nsp: "sgd314614", + table: "scy", + method: Brin, + columns: &[Block, Vid], + cond: Some(TestCond::Unknown("amount > 0")), + }; + parse_one(sql, exp); + + let sql = + "CREATE INDEX manual_token_random_cond ON sgd44.token USING btree (decimals) WHERE (decimals > (5)::numeric)"; + let exp = Parsed { + unique: false, + name: "manual_token_random_cond", + nsp: "sgd44", + table: "token", + method: BTree, + columns: &[Name("decimals")], + cond: Some(TestCond::Unknown("decimals > (5)::numeric")), + }; + parse_one(sql, exp); +} diff --git a/store/postgres/src/relational/prune.rs b/store/postgres/src/relational/prune.rs new file mode 100644 index 00000000000..2fd6f66f6ff --- /dev/null +++ b/store/postgres/src/relational/prune.rs @@ -0,0 +1,404 @@ +use std::{fmt::Write, sync::Arc, time::Instant}; + +use diesel::{ + connection::SimpleConnection, + sql_query, + sql_types::{BigInt, Integer}, + Connection, PgConnection, RunQueryDsl, +}; +use graph::{ + components::store::PruneReporter, + prelude::{BlockNumber, CancelHandle, CancelToken, CancelableError, CheapClone, StoreError}, + slog::{warn, Logger}, +}; +use itertools::Itertools; + +use crate::{ + catalog, + copy::AdaptiveBatchSize, + deployment, + relational::{Table, VID_COLUMN}, +}; + +use super::{Layout, Namespace}; + +/// Utility to copy relevant data out of a source table and into a new +/// destination table and replace the source table with the destination +/// table +struct TablePair { + // The original unpruned table + src: Arc
, + // The temporary table to which we copy the data we'd like to keep. It + // has the same name as `src` but is in a different namespace + dst: Arc
, + src_nsp: Namespace, + dst_nsp: Namespace, +} + +impl TablePair { + /// Create a `TablePair` for `src`. This creates a new table `dst` with + /// the same structure as the `src` table in the database, but in a + /// different namespace so that the names of indexes etc. don't clash + fn create( + conn: &PgConnection, + src: Arc
, + src_nsp: Namespace, + dst_nsp: Namespace, + ) -> Result { + let dst = src.new_like(&dst_nsp, &src.name); + + let mut query = String::new(); + if catalog::table_exists(conn, dst_nsp.as_str(), &dst.name)? { + writeln!(query, "truncate table {};", dst.qualified_name)?; + } else { + dst.as_ddl(&mut query)?; + } + conn.batch_execute(&query)?; + + Ok(TablePair { + src, + dst, + src_nsp, + dst_nsp, + }) + } + + /// Copy all entity versions visible between `earliest_block` and + /// `final_block` in batches, where each batch is a separate + /// transaction. Write activity for nonfinal blocks can happen + /// concurrently to this copy + fn copy_final_entities( + &self, + conn: &PgConnection, + reporter: &mut dyn PruneReporter, + earliest_block: BlockNumber, + final_block: BlockNumber, + cancel: &CancelHandle, + ) -> Result> { + let column_list = self.column_list(); + + // Determine the last vid that we need to copy + let VidRange { min_vid, max_vid } = sql_query(&format!( + "select coalesce(min(vid), 0) as min_vid, \ + coalesce(max(vid), -1) as max_vid from {src} \ + where lower(block_range) <= $2 \ + and coalesce(upper(block_range), 2147483647) > $1 \ + and coalesce(upper(block_range), 2147483647) <= $2 \ + and block_range && int4range($1, $2, '[]')", + src = self.src.qualified_name, + )) + .bind::(earliest_block) + .bind::(final_block) + .get_result::(conn)?; + + let mut batch_size = AdaptiveBatchSize::new(&self.src); + // The first vid we still need to copy + let mut next_vid = min_vid; + let mut total_rows: usize = 0; + while next_vid <= max_vid { + let start = Instant::now(); + let rows = conn.transaction(|| { + // Page through all rows in `src` in batches of `batch_size` + // and copy the ones that are visible to queries at block + // heights between `earliest_block` and `final_block`, but + // whose block_range does not extend past `final_block` + // since they could still be reverted while we copy. + // The conditions on `block_range` are expressed redundantly + // to make more indexes useable + sql_query(&format!( + "insert into {dst}({column_list}) \ + select {column_list} from {src} \ + where lower(block_range) <= $2 \ + and coalesce(upper(block_range), 2147483647) > $1 \ + and coalesce(upper(block_range), 2147483647) <= $2 \ + and block_range && int4range($1, $2, '[]') \ + and vid >= $3 and vid < $3 + $4 \ + order by vid", + src = self.src.qualified_name, + dst = self.dst.qualified_name + )) + .bind::(earliest_block) + .bind::(final_block) + .bind::(next_vid) + .bind::(&batch_size) + .execute(conn) + })?; + cancel.check_cancel()?; + + total_rows += rows; + next_vid += batch_size.size; + + batch_size.adapt(start.elapsed()); + + reporter.copy_final_batch( + self.src.name.as_str(), + rows as usize, + total_rows, + next_vid > max_vid, + ); + } + Ok(total_rows) + } + + /// Copy all entity versions visible after `final_block` in batches, + /// where each batch is a separate transaction. This assumes that all + /// other write activity to the source table is blocked while we copy + fn copy_nonfinal_entities( + &self, + conn: &PgConnection, + reporter: &mut dyn PruneReporter, + final_block: BlockNumber, + ) -> Result { + let column_list = self.column_list(); + + // Determine the last vid that we need to copy + let VidRange { min_vid, max_vid } = sql_query(&format!( + "select coalesce(min(vid), 0) as min_vid, \ + coalesce(max(vid), -1) as max_vid from {src} \ + where coalesce(upper(block_range), 2147483647) > $1 \ + and block_range && int4range($1, null)", + src = self.src.qualified_name, + )) + .bind::(final_block) + .get_result::(conn)?; + + let mut batch_size = AdaptiveBatchSize::new(&self.src); + // The first vid we still need to copy + let mut next_vid = min_vid; + let mut total_rows = 0; + while next_vid <= max_vid { + let start = Instant::now(); + let rows = conn.transaction(|| { + // Page through all the rows in `src` in batches of + // `batch_size` that are visible to queries at block heights + // starting right after `final_block`. + // The conditions on `block_range` are expressed redundantly + // to make more indexes useable + sql_query(&format!( + "insert into {dst}({column_list}) \ + select {column_list} from {src} \ + where coalesce(upper(block_range), 2147483647) > $1 \ + and block_range && int4range($1, null) \ + and vid >= $2 and vid < $2 + $3 \ + order by vid", + dst = self.dst.qualified_name, + src = self.src.qualified_name, + )) + .bind::(final_block) + .bind::(next_vid) + .bind::(&batch_size) + .execute(conn) + .map_err(StoreError::from) + })?; + + total_rows += rows; + next_vid += batch_size.size; + + batch_size.adapt(start.elapsed()); + + reporter.copy_nonfinal_batch( + self.src.name.as_str(), + rows as usize, + total_rows, + next_vid > max_vid, + ); + } + Ok(total_rows) + } + + /// Replace the `src` table with the `dst` table + fn switch(self, logger: &Logger, conn: &PgConnection) -> Result<(), StoreError> { + let src_qname = &self.src.qualified_name; + let dst_qname = &self.dst.qualified_name; + let src_nsp = &self.src_nsp; + let dst_nsp = &self.dst_nsp; + + let vid_seq = format!("{}_{VID_COLUMN}_seq", self.src.name); + + let mut query = String::new(); + + // What we are about to do would get blocked by autovacuum on our + // tables, so just kill the autovacuum + if let Err(e) = catalog::cancel_vacuum(conn, src_nsp) { + warn!(logger, "Failed to cancel vacuum during pruning; trying to carry on regardless"; + "src" => src_nsp.as_str(), "error" => e.to_string()); + } + + // Make sure the vid sequence + // continues from where it was + writeln!( + query, + "select setval('{dst_nsp}.{vid_seq}', nextval('{src_nsp}.{vid_seq}'));" + )?; + + writeln!(query, "drop table {src_qname};")?; + writeln!(query, "alter table {dst_qname} set schema {src_nsp}")?; + conn.transaction(|| conn.batch_execute(&query))?; + + Ok(()) + } + + fn column_list(&self) -> String { + self.src + .column_names() + .map(|name| format!("\"{name}\"")) + .join(", ") + } +} + +impl Layout { + /// Remove all data from the underlying deployment that is not needed to + /// respond to queries before block `earliest_block`. The strategy + /// implemented here works well for situations in which pruning will + /// remove a large amount of data from the subgraph (at least 50%) + /// + /// Blocks before `final_block` are considered final and it is assumed + /// that they will not be modified in any way while pruning is running. + /// Only tables where the ratio of entities to entity versions is below + /// `prune_ratio` will actually be pruned. + /// + /// The strategy for `prune_by_copying` is to copy all data that is + /// needed to respond to queries at block heights at or after + /// `earliest_block` to a new table and then to replace the existing + /// tables with these new tables atomically in a transaction. Copying + /// happens in two stages: we first copy data for final blocks without + /// blocking writes, and then copy data for nonfinal blocks. The latter + /// blocks writes by taking a lock on the row for the deployment in + /// `subgraph_deployment` (via `deployment::lock`) The process for + /// switching to the new tables needs to take the naming of various + /// database objects that Postgres creates automatically into account so + /// that they all have the same names as the original objects to ensure + /// that pruning can be done again without risking name clashes. + /// + /// The reason this strategy works well when a lot (or even the + /// majority) of the data needs to be removed is that in the more + /// straightforward strategy of simply deleting unneeded data, accessing + /// the remaining data becomes very inefficient since it is scattered + /// over a large number of pages, often with just one row per page. We + /// would therefore need to do a full vacuum of the tables after + /// deleting which effectively copies the remaining data into new + /// tables. But a full vacuum takes an `access exclusive` lock which + /// prevents both reads and writes to the table, which means it would + /// also block queries to the deployment, often for extended periods of + /// time. The `prune_by_copying` strategy never blocks reads, it only + /// ever blocks writes. + pub fn prune_by_copying( + &self, + logger: &Logger, + reporter: &mut dyn PruneReporter, + conn: &PgConnection, + earliest_block: BlockNumber, + final_block: BlockNumber, + prune_ratio: f64, + cancel: &CancelHandle, + ) -> Result<(), CancelableError> { + // Analyze all tables and get statistics for them + let mut tables: Vec<_> = self.tables.values().collect(); + reporter.start_analyze(); + tables.sort_by_key(|table| table.name.as_str()); + for table in tables { + reporter.start_analyze_table(table.name.as_str()); + table.analyze(conn)?; + reporter.finish_analyze_table(table.name.as_str()); + cancel.check_cancel()?; + } + let stats = catalog::stats(conn, &self.site.namespace)?; + reporter.finish_analyze(stats.as_slice()); + + // Determine which tables are prunable and create a shadow table for + // them via `TablePair::create` + let dst_nsp = Namespace::prune(self.site.id); + let prunable_tables = conn.transaction(|| -> Result<_, StoreError> { + catalog::recreate_schema(conn, dst_nsp.as_str())?; + + let mut prunable_tables: Vec = self + .tables + .values() + .filter_map(|table| { + stats + .iter() + .find(|s| s.tablename == table.name.as_str()) + .map(|s| (table, s)) + }) + .filter(|(_, stats)| stats.ratio <= prune_ratio) + .map(|(table, _)| { + TablePair::create( + conn, + table.cheap_clone(), + self.site.namespace.clone(), + dst_nsp.clone(), + ) + }) + .collect::>()?; + prunable_tables.sort_by(|a, b| a.src.name.as_str().cmp(b.src.name.as_str())); + Ok(prunable_tables) + })?; + cancel.check_cancel()?; + + // Copy final entities. This can happen in parallel to indexing as + // that part of the table will not change + reporter.copy_final_start(earliest_block, final_block); + for table in &prunable_tables { + table.copy_final_entities(conn, reporter, earliest_block, final_block, cancel)?; + } + reporter.copy_final_finish(); + + let prunable_src: Vec<_> = prunable_tables + .iter() + .map(|table| table.src.clone()) + .collect(); + + // Copy nonfinal entities, and replace the original `src` table with + // the smaller `dst` table + reporter.start_switch(); + // see also: deployment-lock-for-update + deployment::with_lock(conn, &self.site, || -> Result<_, StoreError> { + for table in &prunable_tables { + reporter.copy_nonfinal_start(table.src.name.as_str()); + table.copy_nonfinal_entities(conn, reporter, final_block)?; + cancel.check_cancel().map_err(CancelableError::from)?; + } + + for table in prunable_tables { + conn.transaction(|| table.switch(logger, conn))?; + cancel.check_cancel().map_err(CancelableError::from)?; + } + + Ok(()) + })?; + reporter.finish_switch(); + + // Get rid of the temporary prune schema + catalog::drop_schema(conn, dst_nsp.as_str())?; + + // Analyze the new tables + reporter.start_analyze(); + for table in &prunable_src { + reporter.start_analyze_table(table.name.as_str()); + table.analyze(conn)?; + reporter.finish_analyze_table(table.name.as_str()); + cancel.check_cancel()?; + } + let stats: Vec<_> = catalog::stats(conn, &self.site.namespace)? + .into_iter() + .filter(|s| { + prunable_src + .iter() + .any(|table| *table.name.as_str() == s.tablename) + }) + .collect(); + reporter.finish_analyze(stats.as_slice()); + + reporter.finish_prune(); + + Ok(()) + } +} + +#[derive(QueryableByName)] +struct VidRange { + #[sql_type = "BigInt"] + min_vid: i64, + #[sql_type = "BigInt"] + max_vid: i64, +} diff --git a/store/postgres/src/relational/query_tests.rs b/store/postgres/src/relational/query_tests.rs index 7e89a73b512..acb4610b301 100644 --- a/store/postgres/src/relational/query_tests.rs +++ b/store/postgres/src/relational/query_tests.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{collections::BTreeSet, sync::Arc}; use diesel::{debug_query, pg::Pg}; use graph::{ @@ -33,7 +33,8 @@ fn test_layout(gql: &str) -> Layout { let schema = Schema::parse(gql, subgraph.clone()).expect("Test schema invalid"); let namespace = Namespace::new("sgd0815".to_owned()).unwrap(); let site = Arc::new(make_dummy_site(subgraph, namespace, "anet".to_string())); - let catalog = Catalog::for_tests(site.clone()).expect("Can not create catalog"); + let catalog = + Catalog::for_tests(site.clone(), BTreeSet::new()).expect("Can not create catalog"); Layout::new(site, &schema, catalog).expect("Failed to construct Layout") } @@ -54,7 +55,7 @@ fn filter_contains(filter: EntityFilter, sql: &str) { assert!( query.to_string().contains(sql), "Expected query /{}/ to contain /{}/", - query.to_string(), + query, sql ); } diff --git a/store/postgres/src/relational_queries.rs b/store/postgres/src/relational_queries.rs index e62de38f14c..339dce1db11 100644 --- a/store/postgres/src/relational_queries.rs +++ b/store/postgres/src/relational_queries.rs @@ -14,10 +14,11 @@ use diesel::Connection; use graph::components::store::EntityKey; use graph::data::value::Word; +use graph::data_source::CausalityRegion; use graph::prelude::{ anyhow, r, serde_json, Attribute, BlockNumber, ChildMultiplicity, Entity, EntityCollection, - EntityFilter, EntityLink, EntityOrder, EntityRange, EntityWindow, ParentLink, - QueryExecutionError, StoreError, Value, ENV_VARS, + EntityFilter, EntityLink, EntityOrder, EntityOrderByChild, EntityOrderByChildInfo, EntityRange, + EntityWindow, ParentLink, QueryExecutionError, StoreError, Value, ENV_VARS, }; use graph::{ components::store::{AttributeNames, EntityType}, @@ -39,14 +40,17 @@ use crate::sql_value::SqlValue; use crate::{ block_range::{ BlockRangeColumn, BlockRangeLowerBoundClause, BlockRangeUpperBoundClause, BLOCK_COLUMN, - BLOCK_RANGE_COLUMN, BLOCK_RANGE_CURRENT, + BLOCK_RANGE_COLUMN, BLOCK_RANGE_CURRENT, CAUSALITY_REGION_COLUMN, }, - primary::Namespace, + primary::{Namespace, Site}, }; /// Those are columns that we always want to fetch from the database. const BASE_SQL_COLUMNS: [&'static str; 2] = ["id", "vid"]; +/// The maximum number of bind variables that can be used in a query +const POSTGRES_MAX_PARAMETERS: usize = u16::MAX as usize; // 65535 + #[derive(Debug)] pub(crate) struct UnsupportedFilter { pub filter: String, @@ -445,6 +449,8 @@ pub struct EntityDeletion { entity: String, #[sql_type = "Text"] id: String, + #[sql_type = "Integer"] + causality_region: CausalityRegion, } impl EntityDeletion { @@ -455,6 +461,10 @@ impl EntityDeletion { pub fn id(&self) -> &str { &self.id } + + pub fn causality_region(&self) -> CausalityRegion { + self.causality_region + } } /// Helper struct for retrieving entities from the database. With diesel, we @@ -500,22 +510,12 @@ impl EntityData { if key == "g$parent_id" { match &parent_type { None => { - if ENV_VARS.store.disable_error_for_toplevel_parents { - // Only temporarily in case reporting an - // error causes unexpected trouble. Can - // be removed once it's been working for - // a few days - let value = - T::Value::from_column_value(&ColumnType::String, json)?; - out.insert_entity_data("g$parent_id".to_owned(), value); - } else { - // A query that does not have parents - // somehow returned parent ids. We have no - // idea how to deserialize that - return Err(graph::constraint_violation!( - "query unexpectedly produces parent ids" - )); - } + // A query that does not have parents + // somehow returned parent ids. We have no + // idea how to deserialize that + return Err(graph::constraint_violation!( + "query unexpectedly produces parent ids" + )); } Some(parent_type) => { let value = T::Value::from_column_value(parent_type, json)?; @@ -609,10 +609,8 @@ impl<'a> QueryFragment for QueryValue<'a> { out.push_sql(") || "); } out.push_sql("to_tsvector("); - out.push_bind_param::( - &config.language.as_str().to_string(), - )?; - out.push_sql("::regconfig, "); + out.push_sql(config.language.as_sql()); + out.push_sql(", "); out.push_bind_param::(&value)?; } out.push_sql("))"); @@ -1452,10 +1450,12 @@ impl<'a> QueryFragment for QueryFilter<'a> { } } +/// A query that finds an entity by key. Used during indexing. +/// See also `FindManyQuery`. #[derive(Debug, Clone, Constructor)] pub struct FindQuery<'a> { table: &'a Table, - id: &'a str, + key: &'a EntityKey, block: BlockNumber, } @@ -1463,6 +1463,12 @@ impl<'a> QueryFragment for FindQuery<'a> { fn walk_ast(&self, mut out: AstPass) -> QueryResult<()> { out.unsafe_to_cache_prepared(); + let EntityKey { + entity_type: _, + entity_id, + causality_region, + } = self.key; + // Generate // select '..' as entity, to_jsonb(e.*) as data // from schema.table e where id = $1 @@ -1472,8 +1478,13 @@ impl<'a> QueryFragment for FindQuery<'a> { out.push_sql(" from "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql(" e\n where "); - self.table.primary_key().eq(self.id, &mut out)?; + self.table.primary_key().eq(entity_id, &mut out)?; out.push_sql(" and "); + if self.table.has_causality_region { + out.push_sql("causality_region = "); + out.push_bind_param::(causality_region)?; + out.push_sql(" and "); + } BlockRangeColumn::new(self.table, "e.", self.block).contains(&mut out) } } @@ -1562,7 +1573,13 @@ impl<'a> QueryFragment for FindPossibleDeletionsQuery<'a> { } out.push_sql("select "); out.push_bind_param::(&table.object.as_str())?; - out.push_sql(" as entity, e.id\n"); + out.push_sql(" as entity, "); + if table.has_causality_region { + out.push_sql("causality_region, "); + } else { + out.push_sql("0 as causality_region, "); + } + out.push_sql("e.id\n"); out.push_sql(" from "); out.push_sql(table.qualified_name.as_str()); out.push_sql(" e\n where "); @@ -1590,10 +1607,10 @@ impl<'a, Conn> RunQueryDsl for FindPossibleDeletionsQuery<'a> {} #[derive(Debug, Clone, Constructor)] pub struct FindManyQuery<'a> { pub(crate) _namespace: &'a Namespace, - pub(crate) tables: Vec<&'a Table>, + pub(crate) tables: Vec<(&'a Table, CausalityRegion)>, // Maps object name to ids. - pub(crate) ids_for_type: &'a BTreeMap<&'a EntityType, Vec<&'a str>>, + pub(crate) ids_for_type: &'a BTreeMap<(EntityType, CausalityRegion), Vec>, pub(crate) block: BlockNumber, } @@ -1609,7 +1626,7 @@ impl<'a> QueryFragment for FindManyQuery<'a> { // from schema. e where {id.is_in($ids1)) // union all // ... - for (i, table) in self.tables.iter().enumerate() { + for (i, (table, cr)) in self.tables.iter().enumerate() { if i > 0 { out.push_sql("\nunion all\n"); } @@ -1621,8 +1638,13 @@ impl<'a> QueryFragment for FindManyQuery<'a> { out.push_sql(" e\n where "); table .primary_key() - .is_in(&self.ids_for_type[&table.object], &mut out)?; + .is_in(&self.ids_for_type[&(table.object.clone(), *cr)], &mut out)?; out.push_sql(" and "); + if table.has_causality_region { + out.push_sql("causality_region = "); + out.push_bind_param::(cr)?; + out.push_sql(" and "); + } BlockRangeColumn::new(table, "e.", self.block).contains(&mut out)?; } Ok(()) @@ -1707,6 +1729,28 @@ impl<'a> InsertQuery<'a> { } hashmap.into_iter().map(|(_key, value)| value).collect() } + + /// Return the maximum number of entities that can be inserted with one + /// invocation of `InsertQuery`. The number makes it so that we do not + /// exceed the maximum number of bind variables that can be used in a + /// query, and depends on what columns `table` has and how they get put + /// into the query + pub fn chunk_size(table: &Table) -> usize { + let mut count = 1; + for column in table.columns.iter() { + // This code depends closely on how `walk_ast` and `QueryValue` + // put values into bind variables + if let Some(fields) = &column.fulltext_fields { + // Fulltext fields use one bind variable for each field that + // gets put into the index + count += fields.len() + } else { + // All other values use one bind variable + count += 1 + } + } + POSTGRES_MAX_PARAMETERS / count + } } impl<'a> QueryFragment for InsertQuery<'a> { @@ -1732,12 +1776,16 @@ impl<'a> QueryFragment for InsertQuery<'a> { out.push_sql(", "); } self.br_column.name(&mut out); + if self.table.has_causality_region { + out.push_sql(", "); + out.push_sql(CAUSALITY_REGION_COLUMN); + }; out.push_sql(") values\n"); // Use a `Peekable` iterator to help us decide how to finalize each line. - let mut iter = self.entities.iter().map(|(_key, entity)| entity).peekable(); - while let Some(entity) = iter.next() { + let mut iter = self.entities.iter().peekable(); + while let Some((key, entity)) = iter.next() { out.push_sql("("); for column in &self.unique_columns { // If the column name is not within this entity's fields, we will issue the @@ -1750,6 +1798,10 @@ impl<'a> QueryFragment for InsertQuery<'a> { out.push_sql(", "); } self.br_column.literal_range_current(&mut out)?; + if self.table.has_causality_region { + out.push_sql(", "); + out.push_bind_param::(&key.causality_region)?; + }; out.push_sql(")"); // finalize line according to remaining entities to insert @@ -1906,6 +1958,7 @@ impl ParentIds { #[derive(Debug, Clone)] enum TableLink<'a> { Direct(&'a Column, ChildMultiplicity), + /// The `Table` is the parent table Parent(&'a Table, ParentIds), } @@ -2066,7 +2119,7 @@ impl<'a> FilterWindow<'a> { out.push_sql("\n/* children_type_a */ from unnest("); column.bind_ids(&self.ids, out)?; out.push_sql(") as p(id) cross join lateral (select "); - write_column_names(&self.column_names, self.table, out)?; + write_column_names(&self.column_names, self.table, None, out)?; out.push_sql(" from "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql(" c where "); @@ -2145,7 +2198,7 @@ impl<'a> FilterWindow<'a> { out.push_sql("\n/* children_type_b */ from unnest("); column.bind_ids(&self.ids, out)?; out.push_sql(") as p(id) cross join lateral (select "); - write_column_names(&self.column_names, self.table, out)?; + write_column_names(&self.column_names, self.table, None, out)?; out.push_sql(" from "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql(" c where "); @@ -2190,6 +2243,7 @@ impl<'a> FilterWindow<'a> { fn children_type_c( &self, + parent_primary_key: &Column, child_ids: &[Vec>], limit: ParentLimit<'_>, block: BlockNumber, @@ -2209,12 +2263,12 @@ impl<'a> FilterWindow<'a> { out.push_sql("\n/* children_type_c */ from "); out.push_sql("rows from (unnest("); - out.push_bind_param::, _>(&self.ids)?; + parent_primary_key.bind_ids(&self.ids, out)?; out.push_sql("), reduce_dim("); self.table.primary_key().push_matrix(child_ids, out)?; out.push_sql(")) as p(id, child_ids)"); out.push_sql(" cross join lateral (select "); - write_column_names(&self.column_names, self.table, out)?; + write_column_names(&self.column_names, self.table, None, out)?; out.push_sql(" from "); out.push_sql(self.table.qualified_name.as_str()); out.push_sql(" c where "); @@ -2292,9 +2346,13 @@ impl<'a> FilterWindow<'a> { } } } - TableLink::Parent(_, ParentIds::List(child_ids)) => { - self.children_type_c(child_ids, limit, block, &mut out) - } + TableLink::Parent(parent_table, ParentIds::List(child_ids)) => self.children_type_c( + parent_table.primary_key(), + child_ids, + limit, + block, + &mut out, + ), TableLink::Parent(_, ParentIds::Scalar(child_ids)) => { self.child_type_d(child_ids, limit, block, &mut out) } @@ -2521,9 +2579,69 @@ impl<'a> FilterCollection<'a> { } } +#[derive(Debug, Clone)] +pub struct ChildKeyDetails<'a> { + /// Table representing the parent entity + pub parent_table: &'a Table, + /// Column in the parent table that stores the connection between the parent and the child + pub parent_join_column: &'a Column, + /// Table representing the child entity + pub child_table: &'a Table, + /// Column in the child table that stores the connection between the child and the parent + pub child_join_column: &'a Column, + /// Column of the child table that sorting is done on + pub sort_by_column: &'a Column, + /// Prefix for the child table + pub prefix: String, + /// Either `asc` or `desc` + pub direction: &'static str, +} + +#[derive(Debug, Clone)] +pub struct ChildKeyAndIdSharedDetails<'a> { + /// Table representing the parent entity + pub parent_table: &'a Table, + /// Column in the parent table that stores the connection between the parent and the child + pub parent_join_column: &'a Column, + /// Table representing the child entity + pub child_table: &'a Table, + /// Column in the child table that stores the connection between the child and the parent + pub child_join_column: &'a Column, + /// Column of the child table that sorting is done on + pub sort_by_column: &'a Column, + /// Prefix for the child table + pub prefix: String, + /// Either `asc` or `desc` + pub direction: &'static str, +} + +#[derive(Debug, Clone)] +pub struct ChildIdDetails<'a> { + /// Table representing the parent entity + pub parent_table: &'a Table, + /// Column in the parent table that stores the connection between the parent and the child + pub parent_join_column: &'a Column, + /// Table representing the child entity + pub child_table: &'a Table, + /// Column in the child table that stores the connection between the child and the parent + pub child_join_column: &'a Column, + /// Prefix for the child table + pub prefix: String, +} + +#[derive(Debug, Clone)] +pub enum ChildKey<'a> { + Single(ChildKeyDetails<'a>), + Many(Vec>), + IdAsc(ChildIdDetails<'a>, Option>), + IdDesc(ChildIdDetails<'a>, Option>), + ManyIdAsc(Vec>, Option>), + ManyIdDesc(Vec>, Option>), +} + /// Convenience to pass the name of the column to order by around. If `name` /// is `None`, the sort key should be ignored -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Clone)] pub enum SortKey<'a> { None, /// Order by `id asc` @@ -2536,20 +2654,22 @@ pub enum SortKey<'a> { value: Option<&'a str>, direction: &'static str, }, + /// Order by some other column; `column` will never be `id` + ChildKey(ChildKey<'a>), } /// String representation that is useful for debugging when `walk_ast` fails impl<'a> fmt::Display for SortKey<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use SortKey::*; - match self { - None => write!(f, "none"), - IdAsc(Option::None) => write!(f, "{}", PRIMARY_KEY_COLUMN), - IdAsc(Some(br)) => write!(f, "{}, {}", PRIMARY_KEY_COLUMN, br.column_name()), - IdDesc(Option::None) => write!(f, "{} desc", PRIMARY_KEY_COLUMN), - IdDesc(Some(br)) => write!(f, "{} desc, {} desc", PRIMARY_KEY_COLUMN, br.column_name()), - Key { + SortKey::None => write!(f, "none"), + SortKey::IdAsc(Option::None) => write!(f, "{}", PRIMARY_KEY_COLUMN), + SortKey::IdAsc(Some(br)) => write!(f, "{}, {}", PRIMARY_KEY_COLUMN, br.column_name()), + SortKey::IdDesc(Option::None) => write!(f, "{} desc", PRIMARY_KEY_COLUMN), + SortKey::IdDesc(Some(br)) => { + write!(f, "{} desc, {} desc", PRIMARY_KEY_COLUMN, br.column_name()) + } + SortKey::Key { column, value: _, direction, @@ -2561,20 +2681,117 @@ impl<'a> fmt::Display for SortKey<'a> { PRIMARY_KEY_COLUMN, direction ), + SortKey::ChildKey(child) => match child { + ChildKey::Single(details) => write!( + f, + "{}.{} {}, {}.{} {}", + details.child_table.name.as_str(), + details.sort_by_column.name.as_str(), + details.direction, + details.child_table.name.as_str(), + PRIMARY_KEY_COLUMN, + details.direction + ), + ChildKey::Many(details) => details.iter().try_for_each(|details| { + write!( + f, + "{}.{} {}, {}.{} {}", + details.child_table.name.as_str(), + details.sort_by_column.name.as_str(), + details.direction, + details.child_table.name.as_str(), + PRIMARY_KEY_COLUMN, + details.direction + ) + }), + + ChildKey::ManyIdAsc(details, Option::None) => { + details.iter().try_for_each(|details| { + write!( + f, + "{}.{}", + details.child_table.name.as_str(), + PRIMARY_KEY_COLUMN + ) + }) + } + ChildKey::ManyIdAsc(details, Some(br)) => details.iter().try_for_each(|details| { + write!( + f, + "{}.{}, {}.{}", + details.child_table.name.as_str(), + PRIMARY_KEY_COLUMN, + details.child_table.name.as_str(), + br.column_name() + ) + }), + ChildKey::ManyIdDesc(details, Option::None) => { + details.iter().try_for_each(|details| { + write!( + f, + "{}.{} desc", + details.child_table.name.as_str(), + PRIMARY_KEY_COLUMN + ) + }) + } + ChildKey::ManyIdDesc(details, Some(br)) => details.iter().try_for_each(|details| { + write!( + f, + "{}.{} desc, {}.{} desc", + details.child_table.name.as_str(), + PRIMARY_KEY_COLUMN, + details.child_table.name.as_str(), + br.column_name() + ) + }), + + ChildKey::IdAsc(details, Option::None) => write!( + f, + "{}.{}", + details.child_table.name.as_str(), + PRIMARY_KEY_COLUMN + ), + ChildKey::IdAsc(details, Some(br)) => write!( + f, + "{}.{}, {}.{}", + details.child_table.name.as_str(), + PRIMARY_KEY_COLUMN, + details.child_table.name.as_str(), + br.column_name() + ), + ChildKey::IdDesc(details, Option::None) => write!( + f, + "{}.{} desc", + details.child_table.name.as_str(), + PRIMARY_KEY_COLUMN + ), + ChildKey::IdDesc(details, Some(br)) => { + write!( + f, + "{}.{} desc, {}.{} desc", + details.child_table.name.as_str(), + PRIMARY_KEY_COLUMN, + details.child_table.name.as_str(), + br.column_name() + ) + } + }, } } } +const ASC: &str = "asc"; +const DESC: &str = "desc"; + impl<'a> SortKey<'a> { fn new( order: EntityOrder, collection: &'a FilterCollection, filter: Option<&'a EntityFilter>, block: BlockNumber, + layout: &'a Layout, ) -> Result { - const ASC: &str = "asc"; - const DESC: &str = "desc"; - fn with_key<'a>( table: &'a Table, attribute: String, @@ -2611,6 +2828,223 @@ impl<'a> SortKey<'a> { } } + fn with_child_object_key<'a>( + parent_table: &'a Table, + child_table: &'a Table, + join_attribute: String, + derived: bool, + attribute: String, + br_column: Option>, + direction: &'static str, + ) -> Result, QueryExecutionError> { + let sort_by_column = child_table.column_for_field(&attribute)?; + if sort_by_column.is_fulltext() { + Err(QueryExecutionError::NotSupported( + "Sorting by fulltext fields".to_string(), + )) + } else { + let (parent_column, child_column) = match derived { + true => ( + parent_table.primary_key(), + child_table.column_for_field(&join_attribute).map_err(|_| { + graph::constraint_violation!( + "Column for a join attribute `{}` of `{}` table not found", + join_attribute, + child_table.name.as_str() + ) + })?, + ), + false => ( + parent_table + .column_for_field(&join_attribute) + .map_err(|_| { + graph::constraint_violation!( + "Column for a join attribute `{}` of `{}` table not found", + join_attribute, + parent_table.name.as_str() + ) + })?, + child_table.primary_key(), + ), + }; + + if sort_by_column.is_primary_key() { + return match direction { + ASC => Ok(SortKey::ChildKey(ChildKey::IdAsc( + ChildIdDetails { + parent_table, + child_table, + parent_join_column: parent_column, + child_join_column: child_column, + prefix: "cc".to_string(), + }, + br_column, + ))), + DESC => Ok(SortKey::ChildKey(ChildKey::IdDesc( + ChildIdDetails { + parent_table, + child_table, + parent_join_column: parent_column, + child_join_column: child_column, + prefix: "cc".to_string(), + }, + br_column, + ))), + _ => unreachable!("direction is 'asc' or 'desc'"), + }; + } + + Ok(SortKey::ChildKey(ChildKey::Single(ChildKeyDetails { + parent_table, + child_table, + parent_join_column: parent_column, + child_join_column: child_column, + /// Sort by this column + sort_by_column, + prefix: "cc".to_string(), + direction, + }))) + } + } + + fn build_children_vec<'a>( + layout: &'a Layout, + parent_table: &'a Table, + entity_types: Vec, + child: EntityOrderByChildInfo, + direction: &'static str, + ) -> Result>, QueryExecutionError> { + return entity_types + .iter() + .enumerate() + .map(|(i, entity_type)| { + let child_table = layout.table_for_entity(entity_type)?; + let sort_by_column = child_table.column_for_field(&child.sort_by_attribute)?; + if sort_by_column.is_fulltext() { + Err(QueryExecutionError::NotSupported( + "Sorting by fulltext fields".to_string(), + )) + } else { + let (parent_column, child_column) = match child.derived { + true => ( + parent_table.primary_key(), + child_table + .column_for_field(&child.join_attribute) + .map_err(|_| { + graph::constraint_violation!( + "Column for a join attribute `{}` of `{}` table not found", + child.join_attribute, + child_table.name.as_str() + ) + })?, + ), + false => ( + parent_table + .column_for_field(&child.join_attribute) + .map_err(|_| { + graph::constraint_violation!( + "Column for a join attribute `{}` of `{}` table not found", + child.join_attribute, + parent_table.name.as_str() + ) + })?, + child_table.primary_key(), + ), + }; + + Ok(ChildKeyAndIdSharedDetails { + parent_table, + child_table, + parent_join_column: parent_column, + child_join_column: child_column, + prefix: format!("cc{}", i), + sort_by_column, + direction, + }) + } + }) + .collect::>, QueryExecutionError>>(); + } + + fn with_child_interface_key<'a>( + layout: &'a Layout, + parent_table: &'a Table, + child: EntityOrderByChildInfo, + entity_types: Vec, + br_column: Option>, + direction: &'static str, + ) -> Result, QueryExecutionError> { + if let Some(first_entity) = entity_types.first() { + let child_table = layout.table_for_entity(first_entity)?; + let sort_by_column = child_table.column_for_field(&child.sort_by_attribute)?; + + if sort_by_column.is_fulltext() { + Err(QueryExecutionError::NotSupported( + "Sorting by fulltext fields".to_string(), + )) + } else if sort_by_column.is_primary_key() { + if direction == ASC { + Ok(SortKey::ChildKey(ChildKey::ManyIdAsc( + build_children_vec( + layout, + parent_table, + entity_types, + child, + direction, + )? + .iter() + .map(|asd| ChildIdDetails { + parent_table: asd.parent_table, + child_table: asd.child_table, + parent_join_column: asd.parent_join_column, + child_join_column: asd.child_join_column, + prefix: asd.prefix.clone(), + }) + .collect(), + br_column, + ))) + } else { + Ok(SortKey::ChildKey(ChildKey::ManyIdDesc( + build_children_vec( + layout, + parent_table, + entity_types, + child, + direction, + )? + .iter() + .map(|asd| ChildIdDetails { + parent_table: asd.parent_table, + child_table: asd.child_table, + parent_join_column: asd.parent_join_column, + child_join_column: asd.child_join_column, + prefix: asd.prefix.clone(), + }) + .collect(), + br_column, + ))) + } + } else { + Ok(SortKey::ChildKey(ChildKey::Many( + build_children_vec(layout, parent_table, entity_types, child, direction)? + .iter() + .map(|asd| ChildKeyDetails { + parent_table: asd.parent_table, + parent_join_column: asd.parent_join_column, + child_table: asd.child_table, + child_join_column: asd.child_join_column, + sort_by_column: asd.sort_by_column, + prefix: asd.prefix.clone(), + direction: asd.direction.clone(), + }) + .collect(), + ))) + } + } else { + Ok(SortKey::ChildKey(ChildKey::Many(vec![]))) + } + } + // If there is more than one table, we are querying an interface, // and the order is on an attribute in that interface so that all // tables have a column for that. It is therefore enough to just @@ -2630,6 +3064,34 @@ impl<'a> SortKey<'a> { EntityOrder::Descending(attr, _) => with_key(table, attr, filter, DESC, br_column), EntityOrder::Default => Ok(SortKey::IdAsc(br_column)), EntityOrder::Unordered => Ok(SortKey::None), + EntityOrder::ChildAscending(kind) => match kind { + EntityOrderByChild::Object(child, entity_type) => with_child_object_key( + table, + layout.table_for_entity(&entity_type)?, + child.join_attribute, + child.derived, + child.sort_by_attribute, + br_column, + ASC, + ), + EntityOrderByChild::Interface(child, entity_types) => { + with_child_interface_key(layout, table, child, entity_types, br_column, ASC) + } + }, + EntityOrder::ChildDescending(kind) => match kind { + EntityOrderByChild::Object(child, entity_type) => with_child_object_key( + table, + layout.table_for_entity(&entity_type)?, + child.join_attribute, + child.derived, + child.sort_by_attribute, + br_column, + DESC, + ), + EntityOrderByChild::Interface(child, entity_types) => { + with_child_interface_key(layout, table, child, entity_types, br_column, DESC) + } + }, } } @@ -2656,6 +3118,51 @@ impl<'a> SortKey<'a> { out.push_identifier(column.name.as_str())?; Ok(()) } + SortKey::ChildKey(nested) => { + match nested { + ChildKey::Single(child) => { + if child.sort_by_column.is_primary_key() { + return Err(constraint_violation!("SortKey::Key never uses 'id'")); + } + out.push_sql(", "); + out.push_sql(child.prefix.as_str()); + out.push_sql("."); + out.push_identifier(child.sort_by_column.name.as_str())?; + } + ChildKey::Many(children) => { + for child in children.iter() { + if child.sort_by_column.is_primary_key() { + return Err(constraint_violation!("SortKey::Key never uses 'id'")); + } + out.push_sql(", "); + out.push_sql(child.prefix.as_str()); + out.push_sql("."); + out.push_identifier(child.sort_by_column.name.as_str())?; + } + } + ChildKey::ManyIdAsc(children, br_column) + | ChildKey::ManyIdDesc(children, br_column) => { + for child in children.iter() { + if let Some(br_column) = br_column { + out.push_sql(", "); + out.push_sql(child.prefix.as_str()); + out.push_sql("."); + br_column.name(out); + } + } + } + ChildKey::IdAsc(child, br_column) | ChildKey::IdDesc(child, br_column) => { + if let Some(br_column) = br_column { + out.push_sql(", "); + out.push_sql(child.prefix.as_str()); + out.push_sql("."); + br_column.name(out); + } + } + } + + Ok(()) + } } } @@ -2690,7 +3197,70 @@ impl<'a> SortKey<'a> { direction, } => { out.push_sql("order by "); - SortKey::sort_expr(column, value, direction, out) + SortKey::sort_expr(column, value, direction, None, None, out) + } + SortKey::ChildKey(child) => { + out.push_sql("order by "); + match child { + ChildKey::Single(child) => SortKey::sort_expr( + child.sort_by_column, + &None, + child.direction, + Some(&child.prefix), + Some("c"), + out, + ), + ChildKey::Many(children) => { + let columns: Vec<(&Column, &str)> = children + .iter() + .map(|child| (child.sort_by_column, child.prefix.as_str())) + .collect(); + SortKey::multi_sort_expr( + columns, + children.first().unwrap().direction, + Some("c"), + out, + ) + } + + ChildKey::ManyIdAsc(children, br_column) => { + let prefixes: Vec<&str> = + children.iter().map(|child| child.prefix.as_str()).collect(); + SortKey::multi_sort_id_expr(prefixes, ASC, br_column, out) + } + ChildKey::ManyIdDesc(children, br_column) => { + let prefixes: Vec<&str> = + children.iter().map(|child| child.prefix.as_str()).collect(); + SortKey::multi_sort_id_expr(prefixes, DESC, br_column, out) + } + + ChildKey::IdAsc(child, br_column) => { + out.push_sql(child.prefix.as_str()); + out.push_sql("."); + out.push_identifier(PRIMARY_KEY_COLUMN)?; + if let Some(br_column) = br_column { + out.push_sql(", "); + out.push_sql(child.prefix.as_str()); + out.push_sql("."); + br_column.bare_name(out); + } + Ok(()) + } + ChildKey::IdDesc(child, br_column) => { + out.push_sql(child.prefix.as_str()); + out.push_sql("."); + out.push_identifier(PRIMARY_KEY_COLUMN)?; + out.push_sql(" desc"); + if let Some(br_column) = br_column { + out.push_sql(", "); + out.push_sql(child.prefix.as_str()); + out.push_sql("."); + br_column.bare_name(out); + out.push_sql(" desc"); + } + Ok(()) + } + } } } } @@ -2716,7 +3286,12 @@ impl<'a> SortKey<'a> { direction, } => { out.push_sql("order by g$parent_id, "); - SortKey::sort_expr(column, value, direction, out) + SortKey::sort_expr(column, value, direction, None, None, out) + } + SortKey::ChildKey(_) => { + return Err(diesel::result::Error::QueryBuilderError( + "SortKey::ChildKey cannot be used for parent ordering (yet)".into(), + )); } } } @@ -2727,6 +3302,8 @@ impl<'a> SortKey<'a> { column: &Column, value: &Option<&str>, direction: &str, + column_prefix: Option<&str>, + rest_prefix: Option<&str>, out: &mut AstPass, ) -> QueryResult<()> { if column.is_primary_key() { @@ -2736,6 +3313,13 @@ impl<'a> SortKey<'a> { )); } + fn push_prefix(prefix: Option<&str>, out: &mut AstPass) { + if let Some(prefix) = prefix { + out.push_sql(prefix); + out.push_sql("."); + } + } + match &column.column_type { ColumnType::TSVector(config) => { let algorithm = match config.algorithm { @@ -2744,6 +3328,7 @@ impl<'a> SortKey<'a> { }; out.push_sql(algorithm); let name = column.name.as_str(); + push_prefix(column_prefix, out); out.push_identifier(name)?; out.push_sql(", to_tsquery("); @@ -2752,6 +3337,7 @@ impl<'a> SortKey<'a> { } _ => { let name = column.name.as_str(); + push_prefix(column_prefix, out); out.push_identifier(name)?; } } @@ -2761,17 +3347,232 @@ impl<'a> SortKey<'a> { out.push_sql(direction); out.push_sql(" nulls last"); out.push_sql(", "); + push_prefix(rest_prefix, out); out.push_identifier(PRIMARY_KEY_COLUMN)?; } else { out.push_sql(" "); out.push_sql(direction); out.push_sql(", "); + push_prefix(rest_prefix, out); out.push_identifier(PRIMARY_KEY_COLUMN)?; out.push_sql(" "); out.push_sql(direction); } Ok(()) } + + /// Generate + /// [COALESCE(name1, name2) direction,] id1, id2 + fn multi_sort_expr( + columns: Vec<(&Column, &str)>, + direction: &str, + rest_prefix: Option<&str>, + out: &mut AstPass, + ) -> QueryResult<()> { + for (column, _) in columns.iter() { + if column.is_primary_key() { + // This shouldn't happen since we'd use SortKey::ManyIdAsc/ManyDesc + return Err(constraint_violation!( + "multi_sort_expr called with primary key column" + )); + } + + match column.column_type { + ColumnType::TSVector(_) => { + return Err(constraint_violation!("TSVector is not supported")); + } + _ => {} + } + } + + fn push_prefix(prefix: Option<&str>, out: &mut AstPass) { + if let Some(prefix) = prefix { + out.push_sql(prefix); + out.push_sql("."); + } + } + + out.push_sql("coalesce("); + + for (i, (column, prefix)) in columns.iter().enumerate() { + if i != 0 { + out.push_sql(", "); + } + + let name = column.name.as_str(); + push_prefix(Some(prefix), out); + out.push_identifier(name)?; + } + + out.push_sql(") "); + + if ENV_VARS.store.reversible_order_by_off { + // Old behavior + out.push_sql(direction); + out.push_sql(" nulls last"); + out.push_sql(", "); + push_prefix(rest_prefix, out); + out.push_identifier(PRIMARY_KEY_COLUMN)?; + } else { + out.push_sql(direction); + out.push_sql(", "); + push_prefix(rest_prefix, out); + out.push_identifier(PRIMARY_KEY_COLUMN)?; + out.push_sql(" "); + out.push_sql(direction); + } + Ok(()) + } + + /// Generate + /// COALESCE(id1, id2) direction, [COALESCE(br_column1, br_column2) direction] + fn multi_sort_id_expr( + prefixes: Vec<&str>, + direction: &str, + br_column: &Option, + out: &mut AstPass, + ) -> QueryResult<()> { + fn push_prefix(prefix: Option<&str>, out: &mut AstPass) { + if let Some(prefix) = prefix { + out.push_sql(prefix); + out.push_sql("."); + } + } + + out.push_sql("coalesce("); + for (i, prefix) in prefixes.iter().enumerate() { + if i != 0 { + out.push_sql(", "); + } + + push_prefix(Some(prefix), out); + out.push_identifier(PRIMARY_KEY_COLUMN)?; + } + out.push_sql(") "); + + out.push_sql(direction); + + if let Some(br_column) = br_column { + out.push_sql(", coalesce("); + for (i, prefix) in prefixes.iter().enumerate() { + if i != 0 { + out.push_sql(", "); + } + + push_prefix(Some(prefix), out); + br_column.bare_name(out); + } + out.push_sql(") "); + out.push_sql(direction); + } + + Ok(()) + } + + fn add_child(&self, block: BlockNumber, out: &mut AstPass) -> QueryResult<()> { + fn add( + block: BlockNumber, + child_table: &Table, + child_column: &Column, + parent_column: &Column, + prefix: &str, + out: &mut AstPass, + ) -> QueryResult<()> { + out.push_sql(" left join "); + out.push_sql(child_table.qualified_name.as_str()); + out.push_sql(" as "); + out.push_sql(prefix); + out.push_sql(" on ("); + + if child_column.is_list() { + // Type C: p.id = any(c.child_ids) + out.push_sql("c."); + out.push_identifier(parent_column.name.as_str())?; + out.push_sql(" = any("); + out.push_sql(prefix); + out.push_sql("."); + out.push_identifier(child_column.name.as_str())?; + out.push_sql(")"); + } else if parent_column.is_list() { + // Type A: c.id = any(p.{parent_field}) + out.push_sql(prefix); + out.push_sql("."); + out.push_identifier(child_column.name.as_str())?; + out.push_sql(" = any(c."); + out.push_identifier(parent_column.name.as_str())?; + out.push_sql(")"); + } else { + // Type B: c.id = p.{parent_field} + out.push_sql(prefix); + out.push_sql("."); + out.push_identifier(child_column.name.as_str())?; + out.push_sql(" = "); + out.push_sql("c."); + out.push_identifier(parent_column.name.as_str())?; + } + + out.push_sql(" and "); + out.push_sql(prefix); + out.push_sql("."); + out.push_identifier(BLOCK_RANGE_COLUMN)?; + out.push_sql(" @> "); + out.push_bind_param::(&block)?; + out.push_sql(") "); + + Ok(()) + } + + match self { + SortKey::ChildKey(nested) => match nested { + ChildKey::Single(child) => { + add( + block, + &child.child_table, + &child.child_join_column, + &child.parent_join_column, + &child.prefix, + out, + )?; + } + ChildKey::Many(children) => { + for child in children.iter() { + add( + block, + &child.child_table, + &child.child_join_column, + &child.parent_join_column, + &child.prefix, + out, + )?; + } + } + ChildKey::ManyIdAsc(children, _) | ChildKey::ManyIdDesc(children, _) => { + for child in children.iter() { + add( + block, + &child.child_table, + &child.child_join_column, + &child.parent_join_column, + &child.prefix, + out, + )?; + } + } + ChildKey::IdAsc(child, _) | ChildKey::IdDesc(child, _) => { + add( + block, + &child.child_table, + &child.child_join_column, + &child.parent_join_column, + &child.prefix, + out, + )?; + } + }, + _ => {} + } + Ok(()) + } } /// Generate `[limit {first}] [offset {skip}] @@ -2820,6 +3621,7 @@ pub struct FilterQuery<'a> { range: FilterRange, block: BlockNumber, query_id: Option, + site: &'a Site, } /// String representation that is useful for debugging when `walk_ast` fails @@ -2840,13 +3642,15 @@ impl<'a> fmt::Display for FilterQuery<'a> { impl<'a> FilterQuery<'a> { pub fn new( collection: &'a FilterCollection, + layout: &'a Layout, filter: Option<&'a EntityFilter>, order: EntityOrder, range: EntityRange, block: BlockNumber, query_id: Option, + site: &'a Site, ) -> Result { - let sort_key = SortKey::new(order, collection, filter, block)?; + let sort_key = SortKey::new(order, collection, filter, block, layout)?; Ok(FilterQuery { collection, @@ -2854,6 +3658,7 @@ impl<'a> FilterQuery<'a> { range: FilterRange(range), block, query_id, + site, }) } @@ -2873,6 +3678,8 @@ impl<'a> FilterQuery<'a> { out.push_sql(table.qualified_name.as_str()); out.push_sql(" c"); + self.sort_key.add_child(self.block, &mut out)?; + out.push_sql("\n where "); BlockRangeColumn::new(&table, "c.", self.block).contains(&mut out)?; if let Some(filter) = table_filter { @@ -2910,7 +3717,7 @@ impl<'a> FilterQuery<'a> { ) -> QueryResult<()> { Self::select_entity_and_data(table, &mut out); out.push_sql(" from (select "); - write_column_names(column_names, table, &mut out)?; + write_column_names(column_names, table, Some("c."), &mut out)?; self.filtered_rows(table, filter, out.reborrow())?; out.push_sql("\n "); self.sort_key.order_by(&mut out)?; @@ -3114,10 +3921,17 @@ impl<'a> QueryFragment for FilterQuery<'a> { return Ok(()); } + // Tag the query with various information to make connecting it to + // the GraphQL query it came from easier. The names of the tags are + // chosen so that GCP's Query Insights will recognize them if let Some(qid) = &self.query_id { - out.push_sql("/* qid: "); + out.push_sql("/* controller='filter',application='"); + out.push_sql(self.site.namespace.as_str()); + out.push_sql("',route='"); out.push_sql(qid); - out.push_sql(" */\n"); + out.push_sql("',action='"); + out.push_sql(&self.block.to_string()); + out.push_sql("' */\n"); } // We generate four different kinds of queries, depending on whether // we need to window and whether we query just one or multiple entity @@ -3459,6 +4273,12 @@ impl<'a> QueryFragment for CopyEntityBatchQuery<'a> { } else { out.push_sql(BLOCK_RANGE_COLUMN); } + + if self.dst.has_causality_region { + out.push_sql(", "); + out.push_sql(CAUSALITY_REGION_COLUMN); + }; + out.push_sql(")\nselect "); for column in &self.columns { out.push_identifier(column.name.as_str())?; @@ -3505,6 +4325,24 @@ impl<'a> QueryFragment for CopyEntityBatchQuery<'a> { } (false, false) => out.push_sql(BLOCK_RANGE_COLUMN), } + + match (self.src.has_causality_region, self.dst.has_causality_region) { + (false, false) => (), + (true, true) => { + out.push_sql(", "); + out.push_sql(CAUSALITY_REGION_COLUMN); + } + (false, true) => { + out.push_sql(", 0"); + } + (true, false) => { + return Err(constraint_violation!( + "can not copy entity type {} to {} because the src has a causality region but the dst does not", + self.src.object.as_str(), + self.dst.object.as_str() + )); + } + } out.push_sql(" from "); out.push_sql(self.src.qualified_name.as_str()); out.push_sql(" where vid >= "); @@ -3534,13 +4372,21 @@ pub struct CopyVid { fn write_column_names( column_names: &AttributeNames, table: &Table, + prefix: Option<&str>, out: &mut AstPass, ) -> QueryResult<()> { + let prefix = prefix.unwrap_or(""); + match column_names { - AttributeNames::All => out.push_sql(" * "), + AttributeNames::All => { + out.push_sql(" "); + out.push_sql(prefix); + out.push_sql("*"); + } AttributeNames::Select(column_names) => { let mut iterator = iter_column_names(column_names, table, true).peekable(); while let Some(column_name) = iterator.next() { + out.push_sql(prefix); out.push_identifier(column_name)?; if iterator.peek().is_some() { out.push_sql(", "); diff --git a/store/postgres/src/sql_value.rs b/store/postgres/src/sql_value.rs index bf20419bd90..22439449f2b 100644 --- a/store/postgres/src/sql_value.rs +++ b/store/postgres/src/sql_value.rs @@ -7,7 +7,7 @@ use std::str::FromStr; use graph::data::store::{scalar, Value}; -#[derive(Clone, Debug, PartialEq, AsExpression)] +#[derive(Clone, Debug, PartialEq, Eq, AsExpression)] pub struct SqlValue(Value); impl SqlValue { diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index 23ee0358d8f..cd5fbbd847d 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -5,8 +5,8 @@ use diesel::{ types::{FromSql, ToSql}, }; use std::{ - collections::HashMap, - sync::{Arc, Mutex}, + collections::{BTreeMap, HashMap}, + sync::{atomic::AtomicU8, Arc, Mutex}, }; use std::{fmt, io::Write}; use std::{iter::FromIterator, time::Duration}; @@ -17,7 +17,7 @@ use graph::{ server::index_node::VersionInfo, store::{ self, BlockStore, DeploymentLocator, DeploymentSchemaVersion, - EnsLookup as EnsLookupTrait, SubgraphFork, + EnsLookup as EnsLookupTrait, PruneReporter, SubgraphFork, }, }, constraint_violation, @@ -27,20 +27,20 @@ use graph::{ prelude::{ anyhow, futures03::future::join_all, lazy_static, o, web3::types::Address, ApiSchema, ApiVersion, BlockHash, BlockNumber, BlockPtr, ChainStore, DeploymentHash, EntityOperation, - Logger, MetricsRegistry, NodeId, PartialBlockPtr, Schema, StoreError, SubgraphName, - SubgraphStore as SubgraphStoreTrait, SubgraphVersionSwitchingMode, + Logger, MetricsRegistry, NodeId, PartialBlockPtr, Schema, StoreError, + SubgraphDeploymentEntity, SubgraphName, SubgraphStore as SubgraphStoreTrait, + SubgraphVersionSwitchingMode, }, url::Url, util::timed_cache::TimedCache, }; -use crate::fork; use crate::{ connection_pool::ConnectionPool, deployment::SubgraphHealth, primary, primary::{DeploymentId, Mirror as PrimaryMirror, Site}, - relational::Layout, + relational::{index::Method, Layout}, writable::WritableStore, NotificationSender, }; @@ -49,6 +49,7 @@ use crate::{ detail::DeploymentDetail, primary::UnusedDeployment, }; +use crate::{fork, relational::index::CreateIndex, relational::SqlName}; /// The name of a database shard; valid names must match `[a-z0-9_]+` #[derive(Clone, Debug, Eq, PartialEq, Hash, AsExpression, FromSqlRow)] @@ -228,7 +229,7 @@ impl SubgraphStore { } } - pub(crate) async fn get_proof_of_indexing( + pub async fn get_proof_of_indexing( &self, id: &DeploymentHash, indexer: &Option
, @@ -372,6 +373,20 @@ impl SubgraphStoreInner { Ok(site) } + fn evict(&self, id: &DeploymentHash) -> Result<(), StoreError> { + if let Some((site, _)) = self.sites.remove(id) { + let store = self.stores.get(&site.shard).ok_or_else(|| { + constraint_violation!( + "shard {} for deployment sgd{} not found when evicting", + site.shard, + site.id + ) + })?; + store.layout_cache.remove(&site); + } + Ok(()) + } + fn find_site(&self, id: DeploymentId) -> Result, StoreError> { if let Some(site) = self.sites.find(|site| site.id == id) { return Ok(site); @@ -498,6 +513,8 @@ impl SubgraphStoreInner { #[cfg(not(debug_assertions))] assert!(!replace); + self.evict(&schema.id)?; + let graft_base = deployment .graft_base .as_ref() @@ -606,7 +623,7 @@ impl SubgraphStoreInner { // Transmogrify the deployment into a new one let deployment = DeploymentCreate { manifest: deployment.manifest, - earliest_block: deployment.earliest_block.clone(), + start_block: deployment.start_block.clone(), graft_base: Some(src.deployment.clone()), graft_block: Some(block), debug_fork: deployment.debug_fork, @@ -814,6 +831,11 @@ impl SubgraphStoreInner { Ok(()) } + pub fn status_for_id(&self, id: graph::components::store::DeploymentId) -> status::Info { + let filter = status::Filter::DeploymentIds(vec![id]); + self.status(filter).unwrap().into_iter().next().unwrap() + } + pub(crate) fn status(&self, filter: status::Filter) -> Result, StoreError> { let sites = match filter { status::Filter::SubgraphName(name) => { @@ -1008,18 +1030,44 @@ impl SubgraphStoreInner { pub fn analyze( &self, deployment: &DeploymentLocator, - entity_name: &str, + entity_name: Option<&str>, ) -> Result<(), StoreError> { let (store, site) = self.store(&deployment.hash)?; store.analyze(site, entity_name) } + /// Return the statistics targets for all tables of `deployment`. The + /// first return value is the default target, and the second value maps + /// the name of each table to a map of column name to its statistics + /// target. A value of `-1` means that the global default will be used. + pub fn stats_targets( + &self, + deployment: &DeploymentLocator, + ) -> Result<(i32, BTreeMap>), StoreError> { + let (store, site) = self.store(&deployment.hash)?; + store.stats_targets(site) + } + + /// Set the statistics target for columns `columns` in `deployment`. If + /// `entity` is `Some`, only set it for the table for that entity, if it + /// is `None`, set it for all tables in the deployment. + pub fn set_stats_target( + &self, + deployment: &DeploymentLocator, + entity: Option<&str>, + columns: Vec, + target: i32, + ) -> Result<(), StoreError> { + let (store, site) = self.store(&deployment.hash)?; + store.set_stats_target(site, entity, columns, target) + } + pub async fn create_manual_index( &self, deployment: &DeploymentLocator, entity_name: &str, field_names: Vec, - index_method: String, + index_method: Method, ) -> Result<(), StoreError> { let (store, site) = self.store(&deployment.hash)?; store @@ -1031,7 +1079,7 @@ impl SubgraphStoreInner { &self, deployment: &DeploymentLocator, entity_name: &str, - ) -> Result, StoreError> { + ) -> Result, StoreError> { let (store, site) = self.store(&deployment.hash)?; store.indexes_for_entity(site, entity_name).await } @@ -1054,10 +1102,80 @@ impl SubgraphStoreInner { let (store, site) = self.store(&deployment.hash)?; store.set_account_like(site, table, is_account_like).await } + + /// Remove the history that is only needed to respond to queries before + /// block number `earliest_block` from the given deployment + /// + /// Only tables with a ratio of entities to entity versions below + /// `prune_ratio` will be pruned; that ratio is determined by looking at + /// Postgres planner stats to avoid lengthy counting queries. It is + /// assumed that if the ratio is higher than `prune_ratio` that pruning + /// won't make much of a difference and will just cause unnecessary + /// work. + /// + /// The `reorg_threshold` is used to determine which blocks will not be + /// modified any more by the subgraph writer that may be running + /// concurrently to reduce the amount of time that the writer needs to + /// be locked out while pruning is happening. + /// + /// Pruning can take a long time, and is structured into multiple + /// transactions such that none of them takes an excessively long time. + /// If pruning gets interrupted, it may leave some intermediate tables + /// in the database behind. Those will get cleaned up the next time an + /// attempt is made to prune the same deployment. + pub async fn prune( + &self, + reporter: Box, + deployment: &DeploymentLocator, + earliest_block: BlockNumber, + reorg_threshold: BlockNumber, + prune_ratio: f64, + ) -> Result, StoreError> { + // Find the store by the deployment id; otherwise, we could only + // prune the active copy of the deployment with `deployment.hash` + let site = self.find_site(deployment.id.into())?; + let store = self.for_site(&site)?; + + store + .prune(reporter, site, earliest_block, reorg_threshold, prune_ratio) + .await + } + + pub fn load_deployment(&self, site: &Site) -> Result { + let src_store = self.for_site(site)?; + src_store.load_deployment(site) + } } +const STATE_ENS_NOT_CHECKED: u8 = 0; +const STATE_ENS_EMPTY: u8 = 1; +const STATE_ENS_NOT_EMPTY: u8 = 2; + +/// EnsLookup reads from a rainbow table store in postgres that needs to be manually +/// loaded. To avoid unnecessary database roundtrips, the empty table check is lazy +/// and will not be retried. Once the table is checked, any subsequent calls will +/// just used the stored result. struct EnsLookup { primary: ConnectionPool, + // In order to keep the struct lock free, we'll use u8 for the status: + // 0 - Not Checked + // 1 - Checked - empty + // 2 - Checked - non empty + state: AtomicU8, +} + +impl EnsLookup { + pub fn new(pool: ConnectionPool) -> Self { + Self { + primary: pool, + state: AtomicU8::new(STATE_ENS_NOT_CHECKED), + } + } + + fn is_table_empty(pool: &ConnectionPool) -> Result { + let conn = pool.get()?; + primary::Connection::new(conn).is_ens_table_empty() + } } impl EnsLookupTrait for EnsLookup { @@ -1065,14 +1183,31 @@ impl EnsLookupTrait for EnsLookup { let conn = self.primary.get()?; primary::Connection::new(conn).find_ens_name(hash) } + + fn is_table_empty(&self) -> Result { + match self.state.load(std::sync::atomic::Ordering::SeqCst) { + STATE_ENS_NOT_CHECKED => {} + STATE_ENS_EMPTY => return Ok(true), + STATE_ENS_NOT_EMPTY => return Ok(false), + _ => unreachable!("unsupported state"), + } + + let is_empty = Self::is_table_empty(&self.primary)?; + let new_state = match is_empty { + true => STATE_ENS_EMPTY, + false => STATE_ENS_NOT_EMPTY, + }; + self.state + .store(new_state, std::sync::atomic::Ordering::SeqCst); + + Ok(is_empty) + } } #[async_trait::async_trait] impl SubgraphStoreTrait for SubgraphStore { fn ens_lookup(&self) -> Arc { - Arc::new(EnsLookup { - primary: self.mirror.primary().clone(), - }) + Arc::new(EnsLookup::new(self.mirror.primary().clone())) } // FIXME: This method should not get a node_id @@ -1194,7 +1329,12 @@ impl SubgraphStoreTrait for SubgraphStore { // idempotent and there is ever only one `WritableStore` for any // deployment if let Some(writable) = self.writables.lock().unwrap().get(&deployment) { - return Ok(writable.cheap_clone()); + // A poisoned writable will not write anything anymore; we + // discard it and create a new one that is properly initialized + // according to the state in the database. + if !writable.poisoned() { + return Ok(writable.cheap_clone()); + } } // Ideally the lower level functions would be asyncified. @@ -1215,6 +1355,18 @@ impl SubgraphStoreTrait for SubgraphStore { Ok(writable) } + async fn stop_subgraph(&self, loc: &DeploymentLocator) -> Result<(), StoreError> { + self.evict(&loc.hash)?; + + // Remove the writable from the cache and stop it + let deployment = loc.id.into(); + let writable = self.writables.lock().unwrap().remove(&deployment); + match writable { + Some(writable) => writable.stop().await, + None => Ok(()), + } + } + fn is_deployed(&self, id: &DeploymentHash) -> Result { match self.site(id) { Ok(_) => Ok(true), @@ -1243,4 +1395,13 @@ impl SubgraphStoreTrait for SubgraphStore { .map(|site| site.into()) .collect()) } + + async fn set_manifest_raw_yaml( + &self, + hash: &DeploymentHash, + raw_yaml: String, + ) -> Result<(), StoreError> { + let (store, site) = self.store(hash)?; + store.set_manifest_raw_yaml(site, raw_yaml).await + } } diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 800a0ced080..204af9f7fe3 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -1,3 +1,4 @@ +use std::collections::BTreeSet; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Mutex; use std::time::Duration; @@ -7,9 +8,10 @@ use graph::blockchain::block_stream::FirehoseCursor; use graph::components::store::EntityKey; use graph::components::store::ReadStore; use graph::data::subgraph::schema; -use graph::env::env_var; +use graph::data_source::CausalityRegion; use graph::prelude::{ - BlockNumber, Entity, MetricsRegistry, Schema, SubgraphStore as _, BLOCK_NUMBER_MAX, + BlockNumber, Entity, MetricsRegistry, Schema, SubgraphDeploymentEntity, SubgraphStore as _, + BLOCK_NUMBER_MAX, }; use graph::slog::info; use graph::util::bounded_queue::BoundedQueue; @@ -29,16 +31,6 @@ use store::StoredDynamicDataSource; use crate::deployment_store::DeploymentStore; use crate::{primary, primary::Site, relational::Layout, SubgraphStore}; -graph::prelude::lazy_static! { - /// The size of the write queue; this many blocks can be buffered for - /// writing before calls to transact block operations will block. - /// Setting this to `0` disables pipelined writes, and writes will be - /// done synchronously. - pub static ref WRITE_QUEUE_SIZE: usize = { - env_var("GRAPH_STORE_WRITE_QUEUE", 5) - }; -} - /// A wrapper around `SubgraphStore` that only exposes functions that are /// safe to call from `WritableStore`, i.e., functions that either do not /// deal with anything that depends on a specific deployment @@ -58,6 +50,10 @@ impl WritableSubgraphStore { fn layout(&self, id: &DeploymentHash) -> Result, StoreError> { self.0.layout(id) } + + fn load_deployment(&self, site: &Site) -> Result { + self.0.load_deployment(site) + } } /// Write synchronously to the actual store, i.e., once a method returns, @@ -171,7 +167,8 @@ impl SyncStore { let graft_base = match self.writable.graft_pending(&self.site.deployment)? { Some((base_id, base_ptr)) => { let src = self.store.layout(&base_id)?; - Some((src, base_ptr)) + let deployment_entity = self.store.load_deployment(&src.site)?; + Some((src, base_ptr, deployment_entity)) } None => None, }; @@ -255,7 +252,7 @@ impl SyncStore { data_sources: &[StoredDynamicDataSource], deterministic_errors: &[SubgraphError], manifest_idx_and_name: &[(u32, String)], - offchain_to_remove: &[StoredDynamicDataSource], + processed_data_sources: &[StoredDynamicDataSource], ) -> Result<(), StoreError> { self.retry("transact_block_operations", move || { let event = self.writable.transact_block_operations( @@ -267,7 +264,7 @@ impl SyncStore { data_sources, deterministic_errors, manifest_idx_and_name, - offchain_to_remove, + processed_data_sources, )?; let _section = stopwatch.start_section("send_store_event"); @@ -278,12 +275,20 @@ impl SyncStore { fn get_many( &self, - ids_for_type: BTreeMap<&EntityType, Vec<&str>>, + keys: BTreeSet, block: BlockNumber, - ) -> Result>, StoreError> { + ) -> Result, StoreError> { + let mut by_type: BTreeMap<(EntityType, CausalityRegion), Vec> = BTreeMap::new(); + for key in keys { + by_type + .entry((key.entity_type, key.causality_region)) + .or_default() + .push(key.entity_id.into()); + } + self.retry("get_many", || { self.writable - .get_many(self.site.cheap_clone(), &ids_for_type, block) + .get_many(self.site.cheap_clone(), &by_type, block) }) } @@ -323,6 +328,17 @@ impl SyncStore { .await } + pub(crate) async fn causality_region_curr_val( + &self, + ) -> Result, StoreError> { + self.retry_async("causality_region_curr_val", || async { + self.writable + .causality_region_curr_val(self.site.cheap_clone()) + .await + }) + .await + } + fn deployment_synced(&self) -> Result<(), StoreError> { self.retry("deployment_synced", || { let event = { @@ -394,6 +410,7 @@ impl BlockTracker { self.revert = self.revert.min(block_ptr.number); self.block = self.block.min(block_ptr.number); } + Request::Stop => { /* do nothing */ } } } @@ -424,7 +441,7 @@ enum Request { data_sources: Vec, deterministic_errors: Vec, manifest_idx_and_name: Vec<(u32, String)>, - offchain_to_remove: Vec, + processed_data_sources: Vec, }, RevertTo { store: Arc, @@ -432,10 +449,16 @@ enum Request { block_ptr: BlockPtr, firehose_cursor: FirehoseCursor, }, + Stop, +} + +enum ExecResult { + Continue, + Stop, } impl Request { - fn execute(&self) -> Result<(), StoreError> { + fn execute(&self) -> Result { match self { Request::Write { store, @@ -446,22 +469,27 @@ impl Request { data_sources, deterministic_errors, manifest_idx_and_name, - offchain_to_remove, - } => store.transact_block_operations( - block_ptr_to, - firehose_cursor, - mods, - stopwatch, - data_sources, - deterministic_errors, - manifest_idx_and_name, - offchain_to_remove, - ), + processed_data_sources, + } => store + .transact_block_operations( + block_ptr_to, + firehose_cursor, + mods, + stopwatch, + data_sources, + deterministic_errors, + manifest_idx_and_name, + processed_data_sources, + ) + .map(|()| ExecResult::Continue), Request::RevertTo { store, block_ptr, firehose_cursor, - } => store.revert_block_operations(block_ptr.clone(), firehose_cursor), + } => store + .revert_block_operations(block_ptr.clone(), firehose_cursor) + .map(|()| ExecResult::Continue), + Request::Stop => return Ok(ExecResult::Stop), } } } @@ -550,12 +578,19 @@ impl Queue { }; let _section = queue.stopwatch.start_section("queue_pop"); + use ExecResult::*; match res { - Ok(Ok(())) => { + Ok(Ok(Continue)) => { // The request has been handled. It's now safe to remove it // from the queue queue.queue.pop().await; } + Ok(Ok(Stop)) => { + // Graceful shutdown. We also handled the request + // successfully + queue.queue.pop().await; + return; + } Ok(Err(e)) => { error!(logger, "Subgraph writer failed"; "error" => e.to_string()); queue.record_err(e); @@ -610,6 +645,10 @@ impl Queue { self.check_err() } + async fn stop(&self) -> Result<(), StoreError> { + self.push(Request::Stop).await + } + fn check_err(&self) -> Result<(), StoreError> { if let Some(err) = self.write_err.lock().unwrap().take() { return Err(err); @@ -664,7 +703,7 @@ impl Queue { None } } - Request::RevertTo { .. } => None, + Request::RevertTo { .. } | Request::Stop => None, } }); @@ -678,15 +717,15 @@ impl Queue { /// Get many entities at once by looking at both the queue and the store fn get_many( &self, - mut ids_for_type: BTreeMap<&EntityType, Vec<&str>>, - ) -> Result>, StoreError> { + mut keys: BTreeSet, + ) -> Result, StoreError> { // See the implementation of `get` for how we handle reverts let mut tracker = BlockTracker::new(); // Get entities from entries in the queue - let mut map = self.queue.fold( + let entities_in_queue = self.queue.fold( BTreeMap::new(), - |mut map: BTreeMap>, req| { + |mut map: BTreeMap>, req| { tracker.update(req.as_ref()); match req.as_ref() { Request::Write { @@ -695,50 +734,30 @@ impl Queue { if tracker.visible(block_ptr) { for emod in mods { let key = emod.entity_ref(); - if let Some(ids) = ids_for_type.get_mut(&key.entity_type) { - if let Some(idx) = - ids.iter().position(|id| *id == key.entity_id.as_str()) - { - // We are looking for the entity - // underlying this modification. Add - // it to the result map, but also - // remove it from `ids_for_type` so - // that we don't look for it any - // more - if let Some(entity) = emod.entity() { - map.entry(key.entity_type.clone()) - .or_default() - .push(entity.clone()); - } - ids.swap_remove(idx); - if ids.is_empty() { - ids_for_type.remove(&key.entity_type); - } - } + // The key must be removed to avoid overwriting it with a stale value. + if let Some(key) = keys.take(key) { + map.insert(key, emod.entity().cloned()); } } } } - Request::RevertTo { .. } => { /* nothing to do */ } + Request::RevertTo { .. } | Request::Stop => { /* nothing to do */ } } map }, ); - // Whatever remains in `ids_for_type` needs to be gotten from the - // store. Take extra care to not unnecessarily copy maps - if !ids_for_type.is_empty() { - let store_map = self.store.get_many(ids_for_type, tracker.query_block())?; - if !store_map.is_empty() { - if map.is_empty() { - map = store_map - } else { - for (entity_type, mut entities) in store_map { - map.entry(entity_type).or_default().append(&mut entities); - } - } + // Whatever remains in `keys` needs to be gotten from the store + let mut map = self.store.get_many(keys, tracker.query_block())?; + + // Extend the store results with the entities from the queue. + for (key, entity) in entities_in_queue { + if let Some(entity) = entity { + let overwrite = map.insert(key, entity).is_some(); + assert!(!overwrite); } } + Ok(map) } @@ -762,18 +781,18 @@ impl Queue { Request::Write { block_ptr, data_sources, - offchain_to_remove, + processed_data_sources, .. } => { if tracker.visible(block_ptr) { dds.extend(data_sources.clone()); dds = dds .into_iter() - .filter(|dds| !offchain_to_remove.contains(dds)) + .filter(|dds| !processed_data_sources.contains(dds)) .collect(); } } - Request::RevertTo { .. } => { /* nothing to do */ } + Request::RevertTo { .. } | Request::Stop => { /* nothing to do */ } } dds }); @@ -791,6 +810,10 @@ impl Queue { Ok(dds) } + + fn poisoned(&self) -> bool { + self.poisoned.load(Ordering::SeqCst) + } } /// A shim to allow bypassing any pipelined store handling if need be @@ -823,7 +846,7 @@ impl Writer { data_sources: Vec, deterministic_errors: Vec, manifest_idx_and_name: Vec<(u32, String)>, - offchain_to_remove: Vec, + processed_data_sources: Vec, ) -> Result<(), StoreError> { match self { Writer::Sync(store) => store.transact_block_operations( @@ -834,7 +857,7 @@ impl Writer { &data_sources, &deterministic_errors, &manifest_idx_and_name, - &offchain_to_remove, + &processed_data_sources, ), Writer::Async(queue) => { let req = Request::Write { @@ -846,7 +869,7 @@ impl Writer { data_sources, deterministic_errors, manifest_idx_and_name, - offchain_to_remove, + processed_data_sources, }; queue.push(req).await } @@ -887,11 +910,11 @@ impl Writer { fn get_many( &self, - ids_for_type: BTreeMap<&EntityType, Vec<&str>>, - ) -> Result>, StoreError> { + keys: BTreeSet, + ) -> Result, StoreError> { match self { - Writer::Sync(store) => store.get_many(ids_for_type, BLOCK_NUMBER_MAX), - Writer::Async(queue) => queue.get_many(ids_for_type), + Writer::Sync(store) => store.get_many(keys, BLOCK_NUMBER_MAX), + Writer::Async(queue) => queue.get_many(keys), } } @@ -908,6 +931,20 @@ impl Writer { Writer::Async(queue) => queue.load_dynamic_data_sources(manifest_idx_and_name).await, } } + + fn poisoned(&self) -> bool { + match self { + Writer::Sync(_) => false, + Writer::Async(queue) => queue.poisoned(), + } + } + + async fn stop(&self) -> Result<(), StoreError> { + match self { + Writer::Sync(_) => Ok(()), + Writer::Async(queue) => queue.stop().await, + } + } } pub struct WritableStore { @@ -941,6 +978,14 @@ impl WritableStore { writer, }) } + + pub(crate) fn poisoned(&self) -> bool { + self.writer.poisoned() + } + + pub(crate) async fn stop(&self) -> Result<(), StoreError> { + self.writer.stop().await + } } impl ReadStore for WritableStore { @@ -950,9 +995,9 @@ impl ReadStore for WritableStore { fn get_many( &self, - ids_for_type: BTreeMap<&EntityType, Vec<&str>>, - ) -> Result>, StoreError> { - self.writer.get_many(ids_for_type) + keys: BTreeSet, + ) -> Result, StoreError> { + self.writer.get_many(keys) } fn input_schema(&self) -> Arc { @@ -1039,7 +1084,7 @@ impl WritableStoreTrait for WritableStore { data_sources: Vec, deterministic_errors: Vec, manifest_idx_and_name: Vec<(u32, String)>, - offchain_to_remove: Vec, + processed_data_sources: Vec, ) -> Result<(), StoreError> { self.writer .write( @@ -1050,7 +1095,7 @@ impl WritableStoreTrait for WritableStore { data_sources, deterministic_errors, manifest_idx_and_name, - offchain_to_remove, + processed_data_sources, ) .await?; @@ -1081,6 +1126,12 @@ impl WritableStoreTrait for WritableStore { .await } + async fn causality_region_curr_val(&self) -> Result, StoreError> { + // It should be empty when we call this, but just in case. + self.writer.flush().await?; + self.store.causality_region_curr_val().await + } + fn shard(&self) -> &str { self.store.shard() } diff --git a/store/postgres/tests/chain_head.rs b/store/postgres/tests/chain_head.rs index a358991c3b3..864a78cba25 100644 --- a/store/postgres/tests/chain_head.rs +++ b/store/postgres/tests/chain_head.rs @@ -32,14 +32,14 @@ where F: Fn(Arc, Arc) -> Result<(), Error> + Send + 'static, { run_test_sequentially(|store| async move { - for name in vec![NETWORK_NAME, FAKE_NETWORK_SHARED] { + for name in &[NETWORK_NAME, FAKE_NETWORK_SHARED] { block_store::set_chain(chain.clone(), name); let chain_store = store.block_store().chain_store(name).expect("chain store"); // Run test test(chain_store.cheap_clone(), store.cheap_clone()) - .expect(&format!("test finishes successfully on network {}", name)); + .unwrap_or_else(|_| panic!("test finishes successfully on network {}", name)); } }); } @@ -50,7 +50,7 @@ where R: Future + Send + 'static, { run_test_sequentially(|store| async move { - for name in vec![NETWORK_NAME, FAKE_NETWORK_SHARED] { + for name in &[NETWORK_NAME, FAKE_NETWORK_SHARED] { block_store::set_chain(chain.clone(), name); let chain_store = store.block_store().chain_store(name).expect("chain store"); @@ -123,7 +123,7 @@ fn genesis_plus_one_with_sibling() { #[test] fn short_chain_missing_parent() { let chain = vec![&*BLOCK_ONE_NO_PARENT]; - check_chain_head_update(chain, None, Some(&NO_PARENT)); + check_chain_head_update(chain, None, Some(NO_PARENT)); } #[test] @@ -178,7 +178,7 @@ fn test_get_block_number() { let query_store = subgraph_store .query_store( - QueryTarget::Deployment(subgraph.cheap_clone().into(), Default::default()), + QueryTarget::Deployment(subgraph.cheap_clone(), Default::default()), false, ) .await diff --git a/store/postgres/tests/graft.rs b/store/postgres/tests/graft.rs index b7d51b3dfda..660661778e0 100644 --- a/store/postgres/tests/graft.rs +++ b/store/postgres/tests/graft.rs @@ -1,11 +1,10 @@ use graph::blockchain::block_stream::FirehoseCursor; -use hex_literal::hex; use lazy_static::lazy_static; use std::{marker::PhantomData, str::FromStr}; use test_store::*; use graph::components::store::{ - DeploymentLocator, EntityKey, EntityOrder, EntityQuery, EntityType, + DeploymentLocator, EntityKey, EntityOrder, EntityQuery, EntityType, PruneReporter, }; use graph::data::store::scalar; use graph::data::subgraph::schema::*; @@ -13,7 +12,6 @@ use graph::data::subgraph::*; use graph::prelude::*; use graph::semver::Version; use graph_store_postgres::SubgraphStore as DieselSubgraphStore; -use web3::types::H256; const USER_GQL: &str = " enum Color { yellow, red, blue, green } @@ -74,30 +72,24 @@ type User @entity(immutable: true) { const USER: &str = "User"; -macro_rules! block_pointer { - ($hash:expr, $number:expr) => {{ - BlockPtr::from((H256::from(hex!($hash)), $number as u64)) - }}; -} - lazy_static! { static ref TEST_SUBGRAPH_ID: DeploymentHash = DeploymentHash::new("testsubgraph").unwrap(); static ref TEST_SUBGRAPH_SCHEMA: Schema = Schema::parse(USER_GQL, TEST_SUBGRAPH_ID.clone()).expect("Failed to parse user schema"); static ref BLOCKS: Vec = vec![ - block_pointer!( - "bd34884280958002c51d3f7b5f853e6febeba33de0f40d15b0363006533c924f", - 0 - ), - block_pointer!( - "8511fa04b64657581e3f00e14543c1d522d5d7e771b54aa3060b662ade47da13", - 1 - ), - block_pointer!( - "b98fb783b49de5652097a989414c767824dff7e7fd765a63b493772511db81c1", - 2 - ), - ]; + "bd34884280958002c51d3f7b5f853e6febeba33de0f40d15b0363006533c924f", + "8511fa04b64657581e3f00e14543c1d522d5d7e771b54aa3060b662ade47da13", + "b98fb783b49de5652097a989414c767824dff7e7fd765a63b493772511db81c1", + "7347afe69254df06729e123610b00b8b11f15cfae3241f9366fb113aec07489c", + "f8ccbd3877eb98c958614f395dd351211afb9abba187bfc1fb4ac414b099c4a6", + "7b0ea919e258eb2b119eb32de56b85d12d50ac6a9f7c5909f843d6172c8ba196", + "6b834521bb753c132fdcf0e1034803ed9068e324112f8750ba93580b393a986b", + "7cce080f5a49c2997a6cc65fc1cee9910fd8fc3721b7010c0b5d0873e2ac785e" + ] + .iter() + .enumerate() + .map(|(idx, hash)| BlockPtr::try_from((*hash, idx as i64)).unwrap()) + .collect(); } /// Test harness for running database integration tests. @@ -115,6 +107,8 @@ where // Seed database with test data let deployment = insert_test_data(store.clone()).await; + flush(&deployment).await.unwrap(); + // Run test test(store.cheap_clone(), deployment.clone()) .await @@ -150,7 +144,10 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator }; // Create SubgraphDeploymentEntity - let deployment = DeploymentCreate::new(&manifest, None); + let mut yaml = serde_yaml::Mapping::new(); + yaml.insert("dataSources".into(), Vec::::new().into()); + let yaml = serde_yaml::to_string(&yaml).unwrap(); + let deployment = DeploymentCreate::new(yaml, &manifest, None); let name = SubgraphName::new("test/graft").unwrap(); let node_id = NodeId::new("test").unwrap(); let deployment = store @@ -169,7 +166,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator USER, "Johnton", "tonofjohn@email.com", - 67 as i32, + 67_i32, 184.4, false, None, @@ -183,7 +180,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator USER, "Cindini", "dinici@email.com", - 43 as i32, + 43_i32, 159.1, true, Some("red"), @@ -193,7 +190,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator USER, "Shaqueeena", "queensha@email.com", - 28 as i32, + 28_i32, 111.7, false, Some("blue"), @@ -212,7 +209,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator USER, "Shaqueeena", "teeko@email.com", - 28 as i32, + 28_i32, 111.7, false, None, @@ -262,10 +259,7 @@ fn create_test_entity( ); EntityOperation::Set { - key: EntityKey { - entity_type: EntityType::new(entity_type.to_string()), - entity_id: id.into(), - }, + key: EntityKey::data(entity_type.to_string(), id), data: test_entity, } } @@ -328,10 +322,7 @@ async fn check_graft( // Make our own entries for block 2 shaq.set("email", "shaq@gmail.com"); let op = EntityOperation::Set { - key: EntityKey { - entity_type: EntityType::new(USER.to_owned()), - entity_id: "3".into(), - }, + key: EntityKey::data(USER.to_owned(), "3"), data: shaq, }; transact_and_wait(&store, &deployment, BLOCKS[2].clone(), vec![op]) @@ -449,3 +440,99 @@ fn copy() { check_graft(store, deployment).await }) } + +#[test] +fn prune() { + fn check_at_block( + store: &DieselSubgraphStore, + src: &DeploymentLocator, + block: BlockNumber, + exp: Vec<&str>, + ) { + let query = EntityQuery::new( + src.hash.clone(), + block, + EntityCollection::All(vec![( + EntityType::new("User".to_string()), + AttributeNames::All, + )]), + ); + + let act: Vec<_> = store + .find(query) + .unwrap() + .into_iter() + .map(|entity| entity.id().unwrap()) + .collect(); + assert_eq!(act, exp); + } + + async fn prune( + store: &DieselSubgraphStore, + src: &DeploymentLocator, + earliest_block: BlockNumber, + ) -> Result<(), StoreError> { + struct Progress; + impl PruneReporter for Progress {} + let reporter = Box::new(Progress); + + store + .prune(reporter, src, earliest_block, 1, 1.1) + .await + .map(|_| ()) + } + + run_test(|store, src| async move { + // The setup sets the subgraph pointer to block 2, we try to set + // earliest block to 5 + prune(&store, &src, 5) + .await + .expect_err("setting earliest block later than latest does not work"); + + // Latest block 2 minus reorg threshold 1 means we need to copy + // final blocks from block 1, but want earliest as block 2, i.e. no + // final blocks which won't work + prune(&store, &src, 2) + .await + .expect_err("setting earliest block after last final block fails"); + + // Add another version for user 2 at block 4 + let user2 = create_test_entity( + "2", + USER, + "Cindini", + "dinici@email.com", + 44_i32, + 157.1, + true, + Some("red"), + ); + transact_and_wait(&store, &src, BLOCKS[5].clone(), vec![user2]) + .await + .unwrap(); + + // Setup and the above addition create these user versions: + // id | versions + // ---+--------- + // 1 | [0,) + // 2 | [1,5) [5,) + // 3 | [1,2) [2,) + + // Forward block ptr to block 5 + transact_and_wait(&store, &src, BLOCKS[6].clone(), vec![]) + .await + .unwrap(); + // Pruning only removes the [1,2) version of user 3 + prune(&store, &src, 3).await.expect("pruning works"); + + // Check which versions exist at every block, even if they are + // before the new earliest block, since we don't have a convenient + // way to load all entity versions with their block range + check_at_block(&store, &src, 0, vec!["1"]); + check_at_block(&store, &src, 1, vec!["1", "2"]); + for block in 2..=5 { + check_at_block(&store, &src, block, vec!["1", "2", "3"]); + } + Ok(()) + }) +} diff --git a/store/postgres/tests/relational.rs b/store/postgres/tests/relational.rs index cca31b88ef3..986b4cde872 100644 --- a/store/postgres/tests/relational.rs +++ b/store/postgres/tests/relational.rs @@ -7,8 +7,7 @@ use graph::entity; use graph::prelude::BlockNumber; use graph::prelude::{ o, slog, tokio, web3::types::H256, DeploymentHash, Entity, EntityCollection, EntityFilter, - EntityOrder, EntityQuery, EntityRange, Logger, Schema, StopwatchMetrics, Value, ValueType, - BLOCK_NUMBER_MAX, + EntityOrder, EntityQuery, Logger, Schema, StopwatchMetrics, Value, ValueType, BLOCK_NUMBER_MAX, }; use graph_mock::MockMetricsRegistry; use graph_store_postgres::layout_for_tests::set_account_like; @@ -17,6 +16,7 @@ use graph_store_postgres::layout_for_tests::SqlName; use hex_literal::hex; use lazy_static::lazy_static; use std::borrow::Cow; +use std::collections::BTreeSet; use std::panic; use std::str::FromStr; use std::sync::Arc; @@ -427,7 +427,7 @@ fn create_schema(conn: &PgConnection) -> Layout { let query = format!("create schema {}", NAMESPACE.as_str()); conn.batch_execute(&*query).unwrap(); - Layout::create_relational_schema(&conn, Arc::new(site), &schema) + Layout::create_relational_schema(&conn, Arc::new(site), &schema, BTreeSet::new()) .expect("Failed to create relational schema") } @@ -495,19 +495,31 @@ fn find() { // Happy path: find existing entity let entity = layout - .find(conn, &*SCALAR, "one", BLOCK_NUMBER_MAX) + .find( + conn, + &EntityKey::data(SCALAR.as_str(), "one"), + BLOCK_NUMBER_MAX, + ) .expect("Failed to read Scalar[one]") .unwrap(); assert_entity_eq!(scrub(&*SCALAR_ENTITY), entity); // Find non-existing entity let entity = layout - .find(conn, &*SCALAR, "noone", BLOCK_NUMBER_MAX) + .find( + conn, + &EntityKey::data(SCALAR.as_str(), "noone"), + BLOCK_NUMBER_MAX, + ) .expect("Failed to read Scalar[noone]"); assert!(entity.is_none()); // Find for non-existing entity type - let err = layout.find(conn, &*NO_ENTITY, "one", BLOCK_NUMBER_MAX); + let err = layout.find( + conn, + &EntityKey::data(NO_ENTITY.as_str(), "one"), + BLOCK_NUMBER_MAX, + ); match err { Err(e) => assert_eq!("unknown table 'NoEntity'", e.to_string()), _ => { @@ -530,7 +542,11 @@ fn insert_null_fulltext_fields() { // Find entity with null string values let entity = layout - .find(conn, &*NULLABLE_STRINGS, "one", BLOCK_NUMBER_MAX) + .find( + conn, + &EntityKey::data(NULLABLE_STRINGS.as_str(), "one"), + BLOCK_NUMBER_MAX, + ) .expect("Failed to read NullableStrings[one]") .unwrap(); assert_entity_eq!(scrub(&*EMPTY_NULLABLESTRINGS_ENTITY), entity); @@ -556,7 +572,11 @@ fn update() { .expect("Failed to update"); let actual = layout - .find(conn, &*SCALAR, "one", BLOCK_NUMBER_MAX) + .find( + conn, + &EntityKey::data(SCALAR.as_str(), "one"), + BLOCK_NUMBER_MAX, + ) .expect("Failed to read Scalar[one]") .unwrap(); assert_entity_eq!(scrub(&entity), actual); @@ -612,9 +632,13 @@ fn update_many() { // check updates took effect let updated: Vec = ["one", "two", "three"] .iter() - .map(|id| { + .map(|&id| { layout - .find(conn, &*SCALAR, id, BLOCK_NUMBER_MAX) + .find( + conn, + &EntityKey::data(SCALAR.as_str(), id), + BLOCK_NUMBER_MAX, + ) .expect(&format!("Failed to read Scalar[{}]", id)) .unwrap() }) @@ -680,7 +704,11 @@ fn serialize_bigdecimal() { .expect("Failed to update"); let actual = layout - .find(conn, &*SCALAR, "one", BLOCK_NUMBER_MAX) + .find( + conn, + &EntityKey::data(SCALAR.as_str(), "one"), + BLOCK_NUMBER_MAX, + ) .expect("Failed to read Scalar[one]") .unwrap(); assert_entity_eq!(entity, actual); @@ -694,20 +722,11 @@ fn count_scalar_entities(conn: &PgConnection, layout: &Layout) -> usize { EntityFilter::Equal("bool".into(), false.into()), ]); let collection = EntityCollection::All(vec![(SCALAR.to_owned(), AttributeNames::All)]); + let mut query = EntityQuery::new(layout.site.deployment.clone(), BLOCK_NUMBER_MAX, collection) + .filter(filter); + query.range.first = None; layout - .query::( - &*LOGGER, - &conn, - collection, - Some(filter), - EntityOrder::Default, - EntityRange { - first: None, - skip: 0, - }, - BLOCK_NUMBER_MAX, - None, - ) + .query::(&*LOGGER, &conn, query) .map(|(entities, _)| entities) .expect("Count query failed") .len() @@ -890,7 +909,7 @@ fn revert_block() { let assert_fred = |name: &str| { let fred = layout - .find(conn, &EntityType::from("Cat"), id, BLOCK_NUMBER_MAX) + .find(conn, &EntityKey::data("Cat", id), BLOCK_NUMBER_MAX) .unwrap() .expect("there's a fred"); assert_eq!(name, fred.get("name").unwrap().as_str().unwrap()) @@ -925,20 +944,16 @@ fn revert_block() { }; let assert_marties = |max_block, except: Vec| { + let id = DeploymentHash::new("QmXW3qvxV7zXnwRntpj7yoK8HZVtaraZ67uMqaLRvXdxha").unwrap(); + let collection = + EntityCollection::All(vec![(EntityType::from("Mink"), AttributeNames::All)]); + let filter = EntityFilter::StartsWith("id".to_string(), Value::from("marty")); + let query = EntityQuery::new(id, BLOCK_NUMBER_MAX, collection) + .filter(filter) + .first(100) + .order(EntityOrder::Ascending("order".to_string(), ValueType::Int)); let marties: Vec = layout - .query( - &*LOGGER, - conn, - EntityCollection::All(vec![(EntityType::from("Mink"), AttributeNames::All)]), - Some(EntityFilter::StartsWith( - "id".to_string(), - Value::from("marty"), - )), - EntityOrder::Ascending("order".to_string(), ValueType::Int), - EntityRange::first(100), - BLOCK_NUMBER_MAX, - None, - ) + .query(&*LOGGER, conn, query) .map(|(entities, _)| entities) .expect("loading all marties works"); @@ -1006,21 +1021,13 @@ impl<'a> QueryChecker<'a> { Self { conn, layout } } - fn check(self, expected_entity_ids: Vec<&'static str>, query: EntityQuery) -> Self { + fn check(self, expected_entity_ids: Vec<&'static str>, mut query: EntityQuery) -> Self { let q = query.clone(); let unordered = matches!(query.order, EntityOrder::Unordered); + query.block = BLOCK_NUMBER_MAX; let entities = self .layout - .query::( - &*LOGGER, - self.conn, - query.collection, - query.filter, - query.order, - query.range, - BLOCK_NUMBER_MAX, - None, - ) + .query::(&*LOGGER, self.conn, query) .expect("layout.query failed to execute query") .0; @@ -1669,16 +1676,7 @@ impl<'a> FilterChecker<'a> { let entities = self .layout - .query::( - &*LOGGER, - &self.conn, - query.collection, - query.filter, - query.order, - query.range, - BLOCK_NUMBER_MAX, - None, - ) + .query::(&*LOGGER, &self.conn, query) .expect("layout.query failed to execute query") .0; diff --git a/store/postgres/tests/relational_bytes.rs b/store/postgres/tests/relational_bytes.rs index 56fe6eb3f4d..663084a7100 100644 --- a/store/postgres/tests/relational_bytes.rs +++ b/store/postgres/tests/relational_bytes.rs @@ -3,17 +3,20 @@ use diesel::connection::SimpleConnection as _; use diesel::pg::PgConnection; use graph::components::store::EntityKey; use graph::data::store::scalar; +use graph::data_source::CausalityRegion; +use graph::prelude::EntityQuery; use graph_mock::MockMetricsRegistry; use hex_literal::hex; use lazy_static::lazy_static; use std::borrow::Cow; +use std::collections::BTreeSet; use std::str::FromStr; use std::{collections::BTreeMap, sync::Arc}; use graph::prelude::{ o, slog, web3::types::H256, AttributeNames, ChildMultiplicity, DeploymentHash, Entity, - EntityCollection, EntityLink, EntityOrder, EntityRange, EntityWindow, Logger, ParentLink, - Schema, StopwatchMetrics, Value, WindowAttribute, BLOCK_NUMBER_MAX, + EntityCollection, EntityLink, EntityWindow, Logger, ParentLink, Schema, StopwatchMetrics, + Value, WindowAttribute, BLOCK_NUMBER_MAX, }; use graph::{ components::store::EntityType, @@ -94,7 +97,7 @@ fn insert_entity(conn: &PgConnection, layout: &Layout, entity_type: &str, entity let errmsg = format!("Failed to insert entity {}[{}]", entity_type, key.entity_id); layout .insert( - &conn, + conn, &entity_type, entities.as_mut_slice(), 0, @@ -126,7 +129,7 @@ fn create_schema(conn: &PgConnection) -> Layout { NAMESPACE.clone(), NETWORK_NAME.to_string(), ); - Layout::create_relational_schema(&conn, Arc::new(site), &schema) + Layout::create_relational_schema(conn, Arc::new(site), &schema, BTreeSet::new()) .expect("Failed to create relational schema") } @@ -172,7 +175,7 @@ macro_rules! assert_entity_eq { fn run_test(test: F) where - F: FnOnce(&PgConnection, &Layout) -> (), + F: FnOnce(&PgConnection, &Layout), { run_test_with_conn(|conn| { // Reset state before starting @@ -192,7 +195,11 @@ fn bad_id() { // We test that we get errors for various strings that are not // valid 'Bytes' strings; we use `find` to force the conversion // from String -> Bytes internally - let res = layout.find(conn, &*THING, "bad", BLOCK_NUMBER_MAX); + let res = layout.find( + conn, + &EntityKey::data(THING.as_str(), "bad"), + BLOCK_NUMBER_MAX, + ); assert!(res.is_err()); assert_eq!( "store error: Odd number of digits", @@ -200,7 +207,11 @@ fn bad_id() { ); // We do not allow the `\x` prefix that Postgres uses - let res = layout.find(conn, &*THING, "\\xbadd", BLOCK_NUMBER_MAX); + let res = layout.find( + conn, + &EntityKey::data(THING.as_str(), "\\xbadd"), + BLOCK_NUMBER_MAX, + ); assert!(res.is_err()); assert_eq!( "store error: Invalid character \'\\\\\' at position 0", @@ -208,11 +219,19 @@ fn bad_id() { ); // Having the '0x' prefix is ok - let res = layout.find(conn, &*THING, "0xbadd", BLOCK_NUMBER_MAX); + let res = layout.find( + conn, + &EntityKey::data(THING.as_str(), "0xbadd"), + BLOCK_NUMBER_MAX, + ); assert!(res.is_ok()); // Using non-hex characters is also bad - let res = layout.find(conn, &*THING, "nope", BLOCK_NUMBER_MAX); + let res = layout.find( + conn, + &EntityKey::data(THING.as_str(), "nope"), + BLOCK_NUMBER_MAX, + ); assert!(res.is_err()); assert_eq!( "store error: Invalid character \'n\' at position 0", @@ -226,18 +245,23 @@ fn find() { run_test(|conn, layout| { const ID: &str = "deadbeef"; const NAME: &str = "Beef"; - insert_thing(&conn, &layout, ID, NAME); + insert_thing(conn, layout, ID, NAME); // Happy path: find existing entity let entity = layout - .find(conn, &*THING, ID, BLOCK_NUMBER_MAX) + .find(conn, &EntityKey::data(THING.as_str(), ID), BLOCK_NUMBER_MAX) .expect("Failed to read Thing[deadbeef]") .unwrap(); assert_entity_eq!(scrub(&*BEEF_ENTITY), entity); + assert!(CausalityRegion::from_entity(&entity) == CausalityRegion::ONCHAIN); // Find non-existing entity let entity = layout - .find(conn, &*THING, "badd", BLOCK_NUMBER_MAX) + .find( + conn, + &EntityKey::data(THING.as_str(), "badd"), + BLOCK_NUMBER_MAX, + ) .expect("Failed to read Thing[badd]"); assert!(entity.is_none()); }); @@ -250,49 +274,58 @@ fn find_many() { const NAME: &str = "Beef"; const ID2: &str = "0xdeadbeef02"; const NAME2: &str = "Moo"; - insert_thing(&conn, &layout, ID, NAME); - insert_thing(&conn, &layout, ID2, NAME2); + insert_thing(conn, layout, ID, NAME); + insert_thing(conn, layout, ID2, NAME2); - let mut id_map: BTreeMap<&EntityType, Vec<&str>> = BTreeMap::default(); - id_map.insert(&*THING, vec![ID, ID2, "badd"]); + let mut id_map = BTreeMap::default(); + id_map.insert( + (THING.clone(), CausalityRegion::ONCHAIN), + vec![ID.to_string(), ID2.to_string(), "badd".to_string()], + ); let entities = layout .find_many(conn, &id_map, BLOCK_NUMBER_MAX) .expect("Failed to read many things"); - assert_eq!(1, entities.len()); - - let ids = entities - .get(&*THING) - .expect("We got some things") - .iter() - .map(|thing| thing.id().unwrap()) - .collect::>(); - - assert_eq!(2, ids.len()); - assert!(ids.contains(&ID.to_owned()), "Missing ID"); - assert!(ids.contains(&ID2.to_owned()), "Missing ID2"); + assert_eq!(2, entities.len()); + + let id_key = EntityKey { + entity_id: ID.into(), + entity_type: THING.clone(), + causality_region: CausalityRegion::ONCHAIN, + }; + let id2_key = EntityKey { + entity_id: ID2.into(), + entity_type: THING.clone(), + causality_region: CausalityRegion::ONCHAIN, + }; + assert!(entities.contains_key(&id_key), "Missing ID"); + assert!(entities.contains_key(&id2_key), "Missing ID2"); }); } #[test] fn update() { run_test(|conn, layout| { - insert_entity(&conn, &layout, "Thing", BEEF_ENTITY.clone()); + insert_entity(conn, layout, "Thing", BEEF_ENTITY.clone()); // Update the entity let mut entity = BEEF_ENTITY.clone(); entity.set("name", "Moo"); - let key = EntityKey::data("Thing".to_owned(), entity.id().unwrap().clone()); + let key = EntityKey::data("Thing".to_owned(), entity.id().unwrap()); - let entity_id = entity.id().unwrap().clone(); + let entity_id = entity.id().unwrap(); let entity_type = key.entity_type.clone(); let mut entities = vec![(&key, Cow::from(&entity))]; layout - .update(&conn, &entity_type, &mut entities, 1, &MOCK_STOPWATCH) + .update(conn, &entity_type, &mut entities, 1, &MOCK_STOPWATCH) .expect("Failed to update"); let actual = layout - .find(conn, &*THING, &entity_id, BLOCK_NUMBER_MAX) + .find( + conn, + &EntityKey::data(THING.as_str(), &entity_id), + BLOCK_NUMBER_MAX, + ) .expect("Failed to read Thing[deadbeef]") .unwrap(); @@ -305,17 +338,17 @@ fn delete() { run_test(|conn, layout| { const TWO_ID: &str = "deadbeef02"; - insert_entity(&conn, &layout, "Thing", BEEF_ENTITY.clone()); + insert_entity(conn, layout, "Thing", BEEF_ENTITY.clone()); let mut two = BEEF_ENTITY.clone(); two.set("id", TWO_ID); - insert_entity(&conn, &layout, "Thing", two); + insert_entity(conn, layout, "Thing", two); // Delete where nothing is getting deleted let key = EntityKey::data("Thing".to_owned(), "ffff".to_owned()); let entity_type = key.entity_type.clone(); let mut entity_keys = vec![key.entity_id.as_str()]; let count = layout - .delete(&conn, &entity_type, &entity_keys, 1, &MOCK_STOPWATCH) + .delete(conn, &entity_type, &entity_keys, 1, &MOCK_STOPWATCH) .expect("Failed to delete"); assert_eq!(0, count); @@ -325,7 +358,7 @@ fn delete() { .map(|key| *key = TWO_ID) .expect("Failed to update entity types"); let count = layout - .delete(&conn, &entity_type, &entity_keys, 1, &MOCK_STOPWATCH) + .delete(conn, &entity_type, &entity_keys, 1, &MOCK_STOPWATCH) .expect("Failed to delete"); assert_eq!(1, count); }); @@ -381,25 +414,18 @@ fn make_thing_tree(conn: &PgConnection, layout: &Layout) -> (Entity, Entity, Ent insert_entity(conn, layout, "Thing", root.clone()); insert_entity(conn, layout, "Thing", child1.clone()); insert_entity(conn, layout, "Thing", child2.clone()); - insert_entity(conn, layout, "Thing", grand_child1.clone()); - insert_entity(conn, layout, "Thing", grand_child2.clone()); + insert_entity(conn, layout, "Thing", grand_child1); + insert_entity(conn, layout, "Thing", grand_child2); (root, child1, child2) } #[test] fn query() { fn fetch(conn: &PgConnection, layout: &Layout, coll: EntityCollection) -> Vec { + let id = DeploymentHash::new("QmXW3qvxV7zXnwRntpj7yoK8HZVtaraZ67uMqaLRvXdxha").unwrap(); + let query = EntityQuery::new(id, BLOCK_NUMBER_MAX, coll).first(10); layout - .query::( - &*LOGGER, - conn, - coll, - None, - EntityOrder::Default, - EntityRange::first(10), - BLOCK_NUMBER_MAX, - None, - ) + .query::(&*LOGGER, conn, query) .map(|(entities, _)| entities) .expect("the query succeeds") .into_iter() diff --git a/store/postgres/tests/store.rs b/store/postgres/tests/store.rs index 156c826ac90..c82b347acfb 100644 --- a/store/postgres/tests/store.rs +++ b/store/postgres/tests/store.rs @@ -170,7 +170,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator }; // Create SubgraphDeploymentEntity - let deployment = DeploymentCreate::new(&manifest, None); + let deployment = DeploymentCreate::new(String::new(), &manifest, None); let name = SubgraphName::new("test/store").unwrap(); let node_id = NodeId::new("test").unwrap(); let deployment = store @@ -1296,7 +1296,8 @@ fn entity_changes_are_fired_and_forwarded_to_subscriptions() { chain: PhantomData, }; - let deployment = DeploymentCreate::new(&manifest, Some(TEST_BLOCK_0_PTR.clone())); + let deployment = + DeploymentCreate::new(String::new(), &manifest, Some(TEST_BLOCK_0_PTR.clone())); let name = SubgraphName::new("test/entity-changes-are-fired").unwrap(); let node_id = NodeId::new("test").unwrap(); let deployment = store @@ -2027,6 +2028,39 @@ fn parse_timestamp() { }) } +#[test] +fn parse_timestamp_firehose() { + const EXPECTED_TS: u64 = 1657712166; + + run_test(|store, _, _| async move { + use block_store::*; + // The test subgraph is at block 2. Since we don't ever delete + // the genesis block, the only block eligible for cleanup is BLOCK_ONE + // and the first retained block is block 2. + block_store::set_chain( + vec![ + &*GENESIS_BLOCK, + &*BLOCK_ONE, + &*BLOCK_TWO, + &*BLOCK_THREE_TIMESTAMP_FIREHOSE, + ], + NETWORK_NAME, + ); + let chain_store = store + .block_store() + .chain_store(NETWORK_NAME) + .expect("fake chain store"); + + let (_network, number, timestamp) = chain_store + .block_number(&BLOCK_THREE_TIMESTAMP_FIREHOSE.block_hash()) + .await + .expect("block_number to return correct number and timestamp") + .unwrap(); + assert_eq!(number, 3); + assert_eq!(timestamp.unwrap(), EXPECTED_TS); + }) +} + #[test] /// checks if retrieving the timestamp from the data blob works. /// on ethereum, the block has timestamp as U256 so it will always have a value diff --git a/store/postgres/tests/subgraph.rs b/store/postgres/tests/subgraph.rs index 12d6e2c9be7..b24bad2d8f7 100644 --- a/store/postgres/tests/subgraph.rs +++ b/store/postgres/tests/subgraph.rs @@ -21,7 +21,6 @@ use graph::{ }; use graph_store_postgres::layout_for_tests::Connection as Primary; use graph_store_postgres::SubgraphStore; - use std::{collections::HashSet, marker::PhantomData, sync::Arc}; use test_store::*; @@ -114,7 +113,7 @@ fn create_subgraph() { // Return the versions (not deployments) for a subgraph fn subgraph_versions(primary: &Primary) -> (Option, Option) { - primary.versions_for_subgraph(&*SUBGRAPH_NAME).unwrap() + primary.versions_for_subgraph(SUBGRAPH_NAME).unwrap() } /// Return the deployment for the current and the pending version of the @@ -137,7 +136,7 @@ fn create_subgraph() { let schema = Schema::parse(SUBGRAPH_GQL, id.clone()).unwrap(); let manifest = SubgraphManifest:: { - id: id.clone(), + id: id, spec_version: Version::new(1, 0, 0), features: Default::default(), description: None, @@ -148,7 +147,7 @@ fn create_subgraph() { templates: vec![], chain: PhantomData, }; - let deployment = DeploymentCreate::new(&manifest, None); + let deployment = DeploymentCreate::new(String::new(), &manifest, None); let node_id = NodeId::new("left").unwrap(); let (deployment, events) = tap_store_events(|| { @@ -165,8 +164,7 @@ fn create_subgraph() { }); let events = events .into_iter() - .map(|event| event.changes.into_iter()) - .flatten() + .flat_map(|event| event.changes.into_iter()) .collect(); (deployment, events) } @@ -463,7 +461,7 @@ fn version_info() { .unwrap(); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.synced); assert_eq!(false, vi.failed); assert_eq!( @@ -474,9 +472,9 @@ fn version_info() { Some("repo for versionInfoSubgraph"), vi.repository.as_deref() ); - assert_eq!(&*NAME, vi.schema.id.as_str()); + assert_eq!(NAME, vi.schema.id.as_str()); assert_eq!(Some(1), vi.latest_ethereum_block_number); - assert_eq!(&*NETWORK_NAME, vi.network.as_str()); + assert_eq!(NETWORK_NAME, vi.network.as_str()); // We set the head for the network to null in the test framework assert_eq!(None, vi.total_ethereum_blocks_count); }) @@ -638,7 +636,7 @@ fn fail_unfail_deterministic_error() { .await .unwrap()); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); @@ -658,7 +656,7 @@ fn fail_unfail_deterministic_error() { .await .unwrap()); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -685,7 +683,7 @@ fn fail_unfail_deterministic_error() { .await .unwrap()); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(true, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -702,7 +700,7 @@ fn fail_unfail_deterministic_error() { .await .unwrap()); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); @@ -741,7 +739,7 @@ fn fail_unfail_deterministic_error_noop() { // We don't have any errors and the subgraph is healthy. assert_eq!(count(), 0); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); @@ -758,7 +756,7 @@ fn fail_unfail_deterministic_error_noop() { // Still no fatal errors. assert_eq!(count(), 0); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -778,7 +776,7 @@ fn fail_unfail_deterministic_error_noop() { assert_eq!(outcome, UnfailOutcome::Noop); assert_eq!(count(), 0); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -796,7 +794,7 @@ fn fail_unfail_deterministic_error_noop() { // Now we have a fatal error because the subgraph failed. assert_eq!(count(), 1); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(true, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -811,7 +809,7 @@ fn fail_unfail_deterministic_error_noop() { assert_eq!(outcome, UnfailOutcome::Noop); assert_eq!(count(), 1); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(true, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -838,7 +836,7 @@ fn fail_unfail_deterministic_error_noop() { assert_eq!(outcome, UnfailOutcome::Noop); assert_eq!(count(), 2); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(true, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -877,7 +875,7 @@ fn fail_unfail_non_deterministic_error() { // We don't have any errors. assert_eq!(count(), 0); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); @@ -901,7 +899,7 @@ fn fail_unfail_non_deterministic_error() { // Now we have a fatal error because the subgraph failed. assert_eq!(count(), 1); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(true, vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); @@ -918,7 +916,7 @@ fn fail_unfail_non_deterministic_error() { // Subgraph failed but it's deployment head pointer advanced. assert_eq!(count(), 1); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(true, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -929,7 +927,7 @@ fn fail_unfail_non_deterministic_error() { assert_eq!(outcome, UnfailOutcome::Unfailed); assert_eq!(count(), 0); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -968,7 +966,7 @@ fn fail_unfail_non_deterministic_error_noop() { // We don't have any errors and the subgraph is healthy. assert_eq!(count(), 0); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); @@ -985,7 +983,7 @@ fn fail_unfail_non_deterministic_error_noop() { // Still no errors. assert_eq!(count(), 0); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -1002,7 +1000,7 @@ fn fail_unfail_non_deterministic_error_noop() { assert_eq!(outcome, UnfailOutcome::Noop); assert_eq!(count(), 0); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -1020,7 +1018,7 @@ fn fail_unfail_non_deterministic_error_noop() { // We now have a fatal error because the subgraph failed. assert_eq!(count(), 1); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(true, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -1031,7 +1029,7 @@ fn fail_unfail_non_deterministic_error_noop() { assert_eq!(outcome, UnfailOutcome::Noop); assert_eq!(count(), 1); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(true, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -1053,7 +1051,7 @@ fn fail_unfail_non_deterministic_error_noop() { assert_eq!(outcome, UnfailOutcome::Noop); assert_eq!(count(), 2); let vi = get_version_info(&store, NAME); - assert_eq!(&*NAME, vi.deployment_id.as_str()); + assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(true, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); diff --git a/store/postgres/tests/writable.rs b/store/postgres/tests/writable.rs index 25e9711facc..85558ebd229 100644 --- a/store/postgres/tests/writable.rs +++ b/store/postgres/tests/writable.rs @@ -47,7 +47,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator }; // Create SubgraphDeploymentEntity - let deployment = DeploymentCreate::new(&manifest, None); + let deployment = DeploymentCreate::new(String::new(), &manifest, None); let name = SubgraphName::new("test/writable").unwrap(); let node_id = NodeId::new("test").unwrap(); let deployment = store diff --git a/store/test-store/Cargo.toml b/store/test-store/Cargo.toml index de9dbe691cb..0c7c55bc662 100644 --- a/store/test-store/Cargo.toml +++ b/store/test-store/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "test-store" -version = "0.27.0" +version.workspace = true +edition.workspace = true authors = ["Leonardo Yvens "] -edition = "2021" description = "Provides static store instance for tests." [dependencies] @@ -12,7 +12,9 @@ graph-mock = { path = "../../mock" } graph-node = { path = "../../node" } graph = { path = "../../graph" } graph-store-postgres = { path = "../postgres" } +graph-chain-ethereum= { path = "../../chain/ethereum" } lazy_static = "1.1" hex-literal = "0.3" diesel = { version = "1.4.8", features = ["postgres", "serde_json", "numeric", "r2d2"] } serde = "1.0" +prost-types = { workspace = true } diff --git a/store/test-store/devel/docker-compose.yml b/store/test-store/devel/docker-compose.yml index 5a7d12cae76..a42bec3854f 100644 --- a/store/test-store/devel/docker-compose.yml +++ b/store/test-store/devel/docker-compose.yml @@ -15,6 +15,7 @@ services: POSTGRES_USER: graph-node POSTGRES_PASSWORD: let-me-in POSTGRES_DB: graph-node + POSTGRES_INITDB_ARGS: "-E UTF8 --locale=C" volumes: - ./data/postgres:/var/lib/postgresql/data - ./initdb.d:/docker-entrypoint-initdb.d diff --git a/store/test-store/src/block_store.rs b/store/test-store/src/block_store.rs index dafb2aebed5..fc3e40d08c8 100644 --- a/store/test-store/src/block_store.rs +++ b/store/test-store/src/block_store.rs @@ -4,12 +4,14 @@ use lazy_static::lazy_static; use graph::components::store::BlockStore; use graph::{ - blockchain::Block, + blockchain::Block as BlockchainBlock, prelude::{ serde_json, web3::types::H256, web3::types::U256, BlockHash, BlockNumber, BlockPtr, EthereumBlock, LightEthereumBlock, }, }; +use graph_chain_ethereum::codec::{Block, BlockHeader}; +use prost_types::Timestamp; lazy_static! { // Genesis block @@ -33,6 +35,7 @@ lazy_static! { pub static ref BLOCK_THREE: FakeBlock = BLOCK_TWO.make_child("7347afe69254df06729e123610b00b8b11f15cfae3241f9366fb113aec07489c", None); pub static ref BLOCK_THREE_NO_PARENT: FakeBlock = FakeBlock::make_no_parent(3, "fa9ebe3f74de4c56908b49f5c4044e85825f7350f3fa08a19151de82a82a7313"); pub static ref BLOCK_THREE_TIMESTAMP: FakeBlock = BLOCK_TWO.make_child("6b834521bb753c132fdcf0e1034803ed9068e324112f8750ba93580b393a986b", Some(U256::from(1657712166))); + pub static ref BLOCK_THREE_TIMESTAMP_FIREHOSE: FakeBlock = BLOCK_TWO.make_child("6b834521bb753c132fdcf0e1034803ed9068e324112f8750ba93580b393a986f", Some(U256::from(1657712166))); // This block is special and serializes in a slightly different way, this is needed to simulate non-ethereum behaviour at the store level. If you're not sure // what you are doing, don't use this block for other tests. pub static ref BLOCK_THREE_NO_TIMESTAMP: FakeBlock = BLOCK_TWO.make_child("6b834521bb753c132fdcf0e1034803ed9068e324112f8750ba93580b393a986b", None); @@ -96,9 +99,25 @@ impl FakeBlock { transaction_receipts: Vec::new(), } } + + pub fn as_firehose_block(&self) -> Block { + let mut block = Block::default(); + block.hash = self.hash.clone().into_bytes(); + block.number = self.number as u64; + + let mut header = BlockHeader::default(); + header.parent_hash = self.parent_hash.clone().into_bytes(); + header.timestamp = self.timestamp.map(|ts| Timestamp { + seconds: i64::from_str_radix(&ts.to_string(), 10).unwrap(), + nanos: 0, + }); + block.header = Some(header); + + block + } } -impl Block for FakeBlock { +impl BlockchainBlock for FakeBlock { fn ptr(&self) -> BlockPtr { self.block_ptr() } @@ -115,7 +134,12 @@ impl Block for FakeBlock { } fn data(&self) -> Result { - let mut value: serde_json::Value = serde_json::to_value(self.as_ethereum_block())?; + let mut value: serde_json::Value = if self.eq(&BLOCK_THREE_TIMESTAMP_FIREHOSE) { + self.as_firehose_block().data().unwrap() + } else { + serde_json::to_value(self.as_ethereum_block())? + }; + if !self.eq(&BLOCK_THREE_NO_TIMESTAMP) { return Ok(value); }; @@ -145,6 +169,9 @@ pub fn set_chain(chain: FakeBlockList, network: &str) { .block_store() .chain_store(network) .unwrap(); - let chain: Vec<&dyn Block> = chain.iter().map(|block| *block as &dyn Block).collect(); + let chain: Vec<&dyn BlockchainBlock> = chain + .iter() + .map(|block| *block as &dyn BlockchainBlock) + .collect(); store.set_chain(&GENESIS_BLOCK.hash, chain); } diff --git a/store/test-store/src/store.rs b/store/test-store/src/store.rs index c1158da0b2a..513af19aca6 100644 --- a/store/test-store/src/store.rs +++ b/store/test-store/src/store.rs @@ -3,6 +3,7 @@ use graph::data::graphql::effort::LoadManager; use graph::data::query::QueryResults; use graph::data::query::QueryTarget; use graph::data::subgraph::schema::{DeploymentCreate, SubgraphError}; +use graph::data_source::CausalityRegion; use graph::log; use graph::prelude::{QueryStoreManager as _, SubgraphStore as _, *}; use graph::semver::Version; @@ -63,7 +64,7 @@ lazy_static! { static ref CONFIG: Config = STORE_POOL_CONFIG.2.clone(); pub static ref SUBSCRIPTION_MANAGER: Arc = STORE_POOL_CONFIG.3.clone(); pub static ref NODE_ID: NodeId = NodeId::new("test").unwrap(); - static ref SUBGRAPH_STORE: Arc = STORE.subgraph_store(); + pub static ref SUBGRAPH_STORE: Arc = STORE.subgraph_store(); static ref BLOCK_STORE: Arc = STORE.block_store(); pub static ref GENESIS_PTR: BlockPtr = ( H256::from(hex!( @@ -122,7 +123,7 @@ where /// Run a test with a connection into the primary database, not a full store pub fn run_test_with_conn(test: F) where - F: FnOnce(&PgConnection) -> (), + F: FnOnce(&PgConnection), { // Lock regardless of poisoning. This also forces sequential test execution. let _lock = match SEQ_LOCK.lock() { @@ -167,7 +168,10 @@ pub async fn create_subgraph( chain: PhantomData, }; - let deployment = DeploymentCreate::new(&manifest, None).graft(base); + let mut yaml = serde_yaml::Mapping::new(); + yaml.insert("dataSources".into(), Vec::::new().into()); + let yaml = serde_yaml::to_string(&yaml).unwrap(); + let deployment = DeploymentCreate::new(yaml, &manifest, None).graft(base); let name = { let mut name = subgraph_id.to_string(); name.truncate(32); @@ -223,7 +227,7 @@ pub async fn transact_errors( ); store .subgraph_store() - .writable(LOGGER.clone(), deployment.id.clone()) + .writable(LOGGER.clone(), deployment.id) .await? .transact_block_operations( block_ptr_to, @@ -350,8 +354,9 @@ pub async fn insert_entities( .into_iter() .map(|(entity_type, data)| EntityOperation::Set { key: EntityKey { - entity_type: entity_type.to_owned(), + entity_type, entity_id: data.get("id").unwrap().clone().as_string().unwrap().into(), + causality_region: CausalityRegion::ONCHAIN, }, data, }); @@ -443,6 +448,7 @@ async fn execute_subgraph_query_internal( ) .unwrap(); let network = Some(status[0].chains[0].network.clone()); + let trace = query.trace; let query = return_err!(PreparedQuery::new( &logger, schema, @@ -456,10 +462,7 @@ async fn execute_subgraph_query_internal( let deployment = query.schema.id().clone(); let store = STORE .clone() - .query_store( - QueryTarget::Deployment(deployment.into(), version.clone()), - false, - ) + .query_store(QueryTarget::Deployment(deployment, version.clone()), false) .await .unwrap(); let state = store.deployment_state().await.unwrap(); @@ -489,6 +492,7 @@ async fn execute_subgraph_query_internal( load_manager: LOAD_MANAGER.clone(), max_first: std::u32::MAX, max_skip: std::u32::MAX, + trace, }, ) .await, @@ -542,7 +546,7 @@ fn build_store() -> (Arc, ConnectionPool, Config, Arc { - try { - return execSync(cmd, { cwd: srcDir, stdio: "inherit" }); - } catch (e) { - throw new Error(`Failed to run command \`${cmd}\``); - } -}; - -const waitForSubgraphToFailWithError = async (blockNumber) => - new Promise((resolve, reject) => { - let deadline = Date.now() + 60 * 1000; - - const checkSubgraphFailed = async () => { - try { - let result = await fetchSubgraphs({ - query: `{ - indexingStatusForCurrentVersion(subgraphName: "test/fatal-error") { - health - entityCount - fatalError { - block { - number - } - deterministic - } - - # Test that non-fatal errors can be queried - nonFatalErrors { - handler - } - - # Test that the last healthy block can be queried - chains { - lastHealthyBlock { - number - } - } - } - }`, - }); - - if (result.errors != null) { - reject("query contains errors: " + JSON.stringify(result.errors)); - } - - let status = result.data.indexingStatusForCurrentVersion; - if (status.health == "failed") { - if (status.fatalError.block.number != blockNumber || status.fatalError.deterministic != true) { - reject( - new Error( - "Subgraph failed with unexpected block number: " + - status.fatalError.block.number - ) - ); - } else if (status.entityCount != 1) { - return reject(new Error("There should be only one entity saved in the database, the Proof of Indexing")); - } else { - resolve(); - } - } else { - throw new Error("reject or retry"); - } - } catch (e) { - if (Date.now() > deadline) { - reject(new Error(`Timed out waiting for the subgraph to fail`)); - } else { - setTimeout(checkSubgraphFailed, 500); - } - } - }; - - setTimeout(checkSubgraphFailed, 0); - }); - -contract("Contract", (accounts) => { - // Deploy the subgraph once before all tests - before(async () => { - // Deploy the contract - const contract = await Contract.deployed(); - - // Insert its address into subgraph manifest - await patching.replace( - path.join(srcDir, "subgraph.yaml"), - "0x0000000000000000000000000000000000000000", - contract.address - ); - - // Create and deploy the subgraph - exec("yarn codegen"); - exec(`yarn create:test`); - exec(`yarn deploy:test`); - }); - - it("subgraph fails with expected error", async () => { - await waitForSubgraphToFailWithError(3); - }); -}); diff --git a/tests/integration-tests/fatal-error/truffle.js b/tests/integration-tests/fatal-error/truffle.js deleted file mode 100644 index 55e43ccf6a4..00000000000 --- a/tests/integration-tests/fatal-error/truffle.js +++ /dev/null @@ -1,22 +0,0 @@ -require("babel-register"); -require("babel-polyfill"); - -module.exports = { - contracts_directory: "../common", - migrations_directory: "../common", - contracts_build_directory: "./truffle_output", - networks: { - test: { - host: "localhost", - port: process.env.GANACHE_TEST_PORT || 18545, - network_id: "*", - gas: "100000000000", - gasPrice: "1", - }, - }, - compilers: { - solc: { - version: "0.8.2" - }, - }, -}; diff --git a/tests/integration-tests/file-data-sources/abis/Contract.abi b/tests/integration-tests/file-data-sources/abis/Contract.abi deleted file mode 100644 index 1e3ec7217af..00000000000 --- a/tests/integration-tests/file-data-sources/abis/Contract.abi +++ /dev/null @@ -1 +0,0 @@ -[ ] diff --git a/tests/integration-tests/file-data-sources/schema.graphql b/tests/integration-tests/file-data-sources/schema.graphql deleted file mode 100644 index 476bce7976d..00000000000 --- a/tests/integration-tests/file-data-sources/schema.graphql +++ /dev/null @@ -1,4 +0,0 @@ -type IpfsFile @entity { - id: ID! - content: String! -} diff --git a/tests/integration-tests/file-data-sources/src/mapping.ts b/tests/integration-tests/file-data-sources/src/mapping.ts deleted file mode 100644 index 3431f17bb1a..00000000000 --- a/tests/integration-tests/file-data-sources/src/mapping.ts +++ /dev/null @@ -1,16 +0,0 @@ -import { ethereum, dataSource, BigInt, Bytes } from '@graphprotocol/graph-ts' -import { IpfsFile } from '../generated/schema' - -export function handleBlock(block: ethereum.Block): void { - if (block.number == BigInt.fromI32(0)) { - // CID QmVkvoPGi9jvvuxsHDVJDgzPEzagBaWSZRYoRDzU244HjZ is the file - // `file-data-sources/abis/Contract.abi` after being processed by graph-cli. - dataSource.create("File", ["QmVkvoPGi9jvvuxsHDVJDgzPEzagBaWSZRYoRDzU244HjZ"]) - } -} - -export function handleFile(data: Bytes): void { - let entity = new IpfsFile(dataSource.address().toHexString()) - entity.content = data.toString() - entity.save() -} diff --git a/tests/integration-tests/ganache-reverts/package.json b/tests/integration-tests/ganache-reverts/package.json index dc07d7f2815..fdc5f9119d5 100644 --- a/tests/integration-tests/ganache-reverts/package.json +++ b/tests/integration-tests/ganache-reverts/package.json @@ -2,8 +2,8 @@ "name": "ganache-reverts", "version": "0.1.0", "scripts": { - "build-contracts": "../common/build-contracts.sh", - "codegen": "graph codegen", + "build-contracts": "../../common/build-contracts.sh", + "codegen": "graph codegen --skip-migrations", "test": "yarn build-contracts && truffle test --compile-none --network test", "create:test": "graph create test/ganache-reverts --node $GRAPH_NODE_ADMIN_URI", "deploy:test": "graph deploy test/ganache-reverts --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" diff --git a/tests/integration-tests/host-exports/package.json b/tests/integration-tests/host-exports/package.json index 082ebe0e345..038d6f2cac2 100644 --- a/tests/integration-tests/host-exports/package.json +++ b/tests/integration-tests/host-exports/package.json @@ -2,8 +2,8 @@ "name": "host-exports", "version": "0.1.0", "scripts": { - "build-contracts": "../common/build-contracts.sh", - "codegen": "graph codegen", + "build-contracts": "../../common/build-contracts.sh", + "codegen": "graph codegen --skip-migrations", "test": "yarn build-contracts && truffle test --compile-none --network test", "create:test": "graph create test/host-exports --node $GRAPH_NODE_ADMIN_URI", "deploy:test": "graph deploy test/host-exports --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" diff --git a/tests/integration-tests/host-exports/truffle.js b/tests/integration-tests/host-exports/truffle.js index f8596221165..e3957ea52e8 100644 --- a/tests/integration-tests/host-exports/truffle.js +++ b/tests/integration-tests/host-exports/truffle.js @@ -2,8 +2,8 @@ require("babel-register"); require("babel-polyfill"); module.exports = { - contracts_directory: "../common", - migrations_directory: "../common", + contracts_directory: "../../common", + migrations_directory: "../../common", contracts_build_directory: "./truffle_output", networks: { test: { diff --git a/tests/integration-tests/non-fatal-errors/package.json b/tests/integration-tests/non-fatal-errors/package.json index 02278477cd5..162a4dbd213 100644 --- a/tests/integration-tests/non-fatal-errors/package.json +++ b/tests/integration-tests/non-fatal-errors/package.json @@ -2,8 +2,8 @@ "name": "non-fatal-errors", "version": "0.1.0", "scripts": { - "build-contracts": "../common/build-contracts.sh", - "codegen": "graph codegen", + "build-contracts": "../../common/build-contracts.sh", + "codegen": "graph codegen --skip-migrations", "test": "yarn build-contracts && truffle test --compile-none --network test", "create:test": "graph create test/non-fatal-errors --node $GRAPH_NODE_ADMIN_URI", "deploy:test": "graph deploy test/non-fatal-errors --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" diff --git a/tests/integration-tests/non-fatal-errors/truffle.js b/tests/integration-tests/non-fatal-errors/truffle.js index f8596221165..e3957ea52e8 100644 --- a/tests/integration-tests/non-fatal-errors/truffle.js +++ b/tests/integration-tests/non-fatal-errors/truffle.js @@ -2,8 +2,8 @@ require("babel-register"); require("babel-polyfill"); module.exports = { - contracts_directory: "../common", - migrations_directory: "../common", + contracts_directory: "../../common", + migrations_directory: "../../common", contracts_build_directory: "./truffle_output", networks: { test: { diff --git a/tests/integration-tests/overloaded-contract-functions/package.json b/tests/integration-tests/overloaded-contract-functions/package.json index e43c3f23a0c..ac6c59b16db 100644 --- a/tests/integration-tests/overloaded-contract-functions/package.json +++ b/tests/integration-tests/overloaded-contract-functions/package.json @@ -2,8 +2,8 @@ "name": "overloaded-contract-functions", "version": "0.1.0", "scripts": { - "build-contracts": "../common/build-contracts.sh", - "codegen": "graph codegen", + "build-contracts": "../../common/build-contracts.sh", + "codegen": "graph codegen --skip-migrations", "test": "yarn build-contracts && truffle test --compile-none --network test", "create:test": "graph create test/overloaded-contract-functions --node $GRAPH_NODE_ADMIN_URI", "deploy:test": "graph deploy test/overloaded-contract-functions --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" diff --git a/tests/integration-tests/package.json b/tests/integration-tests/package.json index 845bd090977..1d272de326c 100644 --- a/tests/integration-tests/package.json +++ b/tests/integration-tests/package.json @@ -2,16 +2,12 @@ "private": true, "workspaces": [ "api-version-v0-0-4", - "data-source-revert", - "fatal-error", "ganache-reverts", "host-exports", "non-fatal-errors", "overloaded-contract-functions", "poi-for-failed-subgraph", "remove-then-update", - "typename", - "value-roundtrip", - "file-data-sources" + "value-roundtrip" ] -} \ No newline at end of file +} diff --git a/tests/integration-tests/poi-for-failed-subgraph/package.json b/tests/integration-tests/poi-for-failed-subgraph/package.json index 70c4ddbe625..665bf0ace52 100644 --- a/tests/integration-tests/poi-for-failed-subgraph/package.json +++ b/tests/integration-tests/poi-for-failed-subgraph/package.json @@ -2,8 +2,8 @@ "name": "poi-for-failed-subgraph", "version": "0.1.0", "scripts": { - "build-contracts": "../common/build-contracts.sh", - "codegen": "graph codegen", + "build-contracts": "../../common/build-contracts.sh", + "codegen": "graph codegen --skip-migrations", "test": "yarn build-contracts && truffle test --compile-none --network test", "create:test": "graph create test/poi-for-failed-subgraph --node $GRAPH_NODE_ADMIN_URI", "deploy:test": "graph deploy test/poi-for-failed-subgraph --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" diff --git a/tests/integration-tests/poi-for-failed-subgraph/truffle.js b/tests/integration-tests/poi-for-failed-subgraph/truffle.js index 55e43ccf6a4..58130e7d21d 100644 --- a/tests/integration-tests/poi-for-failed-subgraph/truffle.js +++ b/tests/integration-tests/poi-for-failed-subgraph/truffle.js @@ -2,8 +2,8 @@ require("babel-register"); require("babel-polyfill"); module.exports = { - contracts_directory: "../common", - migrations_directory: "../common", + contracts_directory: "../../common", + migrations_directory: "../../common", contracts_build_directory: "./truffle_output", networks: { test: { diff --git a/tests/integration-tests/remove-then-update/package.json b/tests/integration-tests/remove-then-update/package.json index 2547745cd56..91eea8a5417 100644 --- a/tests/integration-tests/remove-then-update/package.json +++ b/tests/integration-tests/remove-then-update/package.json @@ -2,8 +2,8 @@ "name": "remove-then-update", "version": "0.1.0", "scripts": { - "build-contracts": "../common/build-contracts.sh", - "codegen": "graph codegen", + "build-contracts": "../../common/build-contracts.sh", + "codegen": "graph codegen --skip-migrations", "test": "yarn build-contracts && truffle test --compile-none --network test", "create:test": "graph create test/remove-then-update --node $GRAPH_NODE_ADMIN_URI", "deploy:test": "graph deploy test/remove-then-update --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" diff --git a/tests/integration-tests/remove-then-update/truffle.js b/tests/integration-tests/remove-then-update/truffle.js index f8596221165..e3957ea52e8 100644 --- a/tests/integration-tests/remove-then-update/truffle.js +++ b/tests/integration-tests/remove-then-update/truffle.js @@ -2,8 +2,8 @@ require("babel-register"); require("babel-polyfill"); module.exports = { - contracts_directory: "../common", - migrations_directory: "../common", + contracts_directory: "../../common", + migrations_directory: "../../common", contracts_build_directory: "./truffle_output", networks: { test: { diff --git a/tests/integration-tests/value-roundtrip/package.json b/tests/integration-tests/value-roundtrip/package.json index b8d2f4b6bf0..cf177d5c862 100644 --- a/tests/integration-tests/value-roundtrip/package.json +++ b/tests/integration-tests/value-roundtrip/package.json @@ -2,8 +2,8 @@ "name": "value-roundtrip", "version": "0.1.0", "scripts": { - "build-contracts": "../common/build-contracts.sh", - "codegen": "graph codegen", + "build-contracts": "../../common/build-contracts.sh", + "codegen": "graph codegen --skip-migrations", "test": "yarn build-contracts && truffle test --compile-none --network test", "create:test": "graph create test/value-roundtrip --node $GRAPH_NODE_ADMIN_URI", "deploy:test": "graph deploy test/value-roundtrip --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" diff --git a/tests/integration-tests/value-roundtrip/truffle.js b/tests/integration-tests/value-roundtrip/truffle.js index 83606ff75a1..27a9675b4d7 100644 --- a/tests/integration-tests/value-roundtrip/truffle.js +++ b/tests/integration-tests/value-roundtrip/truffle.js @@ -2,8 +2,8 @@ require("babel-register"); require("babel-polyfill"); module.exports = { - contracts_directory: "../common", - migrations_directory: "../common", + contracts_directory: "../../common", + migrations_directory: "../../common", contracts_build_directory: "./truffle_output", networks: { test: { diff --git a/tests/integration-tests/yarn.lock b/tests/integration-tests/yarn.lock index f1b8e0c67d9..bf8ff17f87e 100644 --- a/tests/integration-tests/yarn.lock +++ b/tests/integration-tests/yarn.lock @@ -917,8 +917,8 @@ yaml "^1.5.1" "@graphprotocol/graph-ts@https://github.com/graphprotocol/graph-ts#main": - version "0.27.0" - resolved "https://github.com/graphprotocol/graph-ts#55545f3d39b1c6b78e7d96aa83cc2692b8e2ae33" + version "0.28.1" + resolved "https://github.com/graphprotocol/graph-ts#4e91d2c0b695c7689aba205516d3e80fb5588454" dependencies: assemblyscript "0.19.10" @@ -2033,13 +2033,13 @@ abstract-leveldown@~6.2.1: level-supports "~1.0.0" xtend "~4.0.0" -accepts@^1.3.5, accepts@~1.3.7: - version "1.3.7" - resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.7.tgz#531bc726517a3b2b41f850021c6cc15eaab507cd" - integrity sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA== +accepts@^1.3.5, accepts@~1.3.8: + version "1.3.8" + resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.8.tgz#0bf0be125b67014adcb0b0921e62db7bffe16b2e" + integrity sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw== dependencies: - mime-types "~2.1.24" - negotiator "0.6.2" + mime-types "~2.1.34" + negotiator "0.6.3" acorn-globals@^1.0.4: version "1.0.9" @@ -2395,7 +2395,7 @@ array-filter@^1.0.0: array-flatten@1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2" - integrity sha1-ml9pkFGx5wczKPKgCJaLZOopVdI= + integrity sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg== array-union@^2.1.0: version "2.1.0" @@ -2923,21 +2923,23 @@ bn.js@^5.1.2, bn.js@^5.2.0, bn.js@^5.2.1: resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-5.2.1.tgz#0bc527a6a0d18d0aa8d5b0538ce4a77dccfa7b70" integrity sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ== -body-parser@1.19.0, body-parser@^1.16.0, body-parser@^1.18.3: - version "1.19.0" - resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.19.0.tgz#96b2709e57c9c4e09a6fd66a8fd979844f69f08a" - integrity sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw== +body-parser@1.20.1, body-parser@^1.16.0, body-parser@^1.18.3: + version "1.20.1" + resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.20.1.tgz#b1812a8912c195cd371a3ee5e66faa2338a5c668" + integrity sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw== dependencies: - bytes "3.1.0" + bytes "3.1.2" content-type "~1.0.4" debug "2.6.9" - depd "~1.1.2" - http-errors "1.7.2" + depd "2.0.0" + destroy "1.2.0" + http-errors "2.0.0" iconv-lite "0.4.24" - on-finished "~2.3.0" - qs "6.7.0" - raw-body "2.4.0" - type-is "~1.6.17" + on-finished "2.4.1" + qs "6.11.0" + raw-body "2.5.1" + type-is "~1.6.18" + unpipe "1.0.0" boolbase@^1.0.0, boolbase@~1.0.0: version "1.0.0" @@ -3162,10 +3164,10 @@ busboy@^0.3.1: dependencies: dicer "0.3.0" -bytes@3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.0.tgz#f6cf7933a360e0588fa9fde85651cdc7f805d1f6" - integrity sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg== +bytes@3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.2.tgz#8b0beeb98605adf1b128fa4386403c009e0221a5" + integrity sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg== cacheable-request@^6.0.0: version "6.1.0" @@ -3676,12 +3678,12 @@ constant-case@^2.0.0: snake-case "^2.1.0" upper-case "^1.1.1" -content-disposition@0.5.3: - version "0.5.3" - resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.3.tgz#e130caf7e7279087c5616c2007d0485698984fbd" - integrity sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g== +content-disposition@0.5.4: + version "0.5.4" + resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.4.tgz#8b82b4efac82512a02bb0b1dcec9d2c5e8eb5bfe" + integrity sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ== dependencies: - safe-buffer "5.1.2" + safe-buffer "5.2.1" content-hash@^2.5.2: version "2.5.2" @@ -3707,12 +3709,12 @@ convert-source-map@1.X, convert-source-map@^1.5.1, convert-source-map@^1.7.0: cookie-signature@1.0.6: version "1.0.6" resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c" - integrity sha1-4wOogrNCzD7oylE6eZmXNNqzriw= + integrity sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ== -cookie@0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.4.0.tgz#beb437e7022b3b6d49019d088665303ebe9c14ba" - integrity sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg== +cookie@0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.5.0.tgz#d1f5d71adec6558c58f389987c366aa47e994f8b" + integrity sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw== cookiejar@^2.1.1: version "2.1.2" @@ -3964,7 +3966,7 @@ debug@4.1.1: dependencies: ms "^2.1.1" -debug@4.3.1, debug@^4.1.1, debug@^4.3.1: +debug@4.3.1: version "4.3.1" resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.1.tgz#f0d229c505e0c6d8c49ac553d1b13dc183f6b2ee" integrity sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ== @@ -3978,7 +3980,7 @@ debug@^3.1.0, debug@^3.2.6: dependencies: ms "^2.1.1" -debug@^4.1.0: +debug@^4.1.0, debug@^4.1.1, debug@^4.3.1: version "4.3.4" resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== @@ -4074,6 +4076,11 @@ delimit-stream@0.1.0: resolved "https://registry.yarnpkg.com/delimit-stream/-/delimit-stream-0.1.0.tgz#9b8319477c0e5f8aeb3ce357ae305fc25ea1cd2b" integrity sha512-a02fiQ7poS5CnjiJBAsjGLPp5EwVoGHNeu9sziBd9huppRfsAFIpv5zNLv0V1gbop53ilngAf5Kf331AwcoRBQ== +depd@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/depd/-/depd-2.0.0.tgz#b696163cc757560d09cf22cc8fad1571b79e76df" + integrity sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw== + depd@~1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.2.tgz#9bcd52e14c097763e749b274c4346ed2e560b5a9" @@ -4092,10 +4099,10 @@ des.js@^1.0.0: inherits "^2.0.1" minimalistic-assert "^1.0.0" -destroy@~1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80" - integrity sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA= +destroy@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.2.0.tgz#4803735509ad8be552934c67df614f94e66fa015" + integrity sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg== detect-indent@^4.0.0: version "4.0.0" @@ -4321,7 +4328,7 @@ ecc-jsbn@~0.1.1: ee-first@1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" - integrity sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0= + integrity sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow== ejs@^2.6.1: version "2.7.4" @@ -4377,7 +4384,7 @@ emoji-regex@^8.0.0: encodeurl@~1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59" - integrity sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k= + integrity sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w== encoding-down@^6.3.0: version "6.3.0" @@ -4647,7 +4654,7 @@ esutils@^2.0.2: etag@~1.8.1: version "1.8.1" resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.1.tgz#41ae2eeb65efa62268aebfea83ac7d79299b0887" - integrity sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc= + integrity sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg== eth-block-tracker@^4.4.2: version "4.4.3" @@ -5030,37 +5037,38 @@ explain-error@^1.0.4: integrity sha512-/wSgNMxFusiYRy1rd19LT2SQlIXDppHpumpWo06wxjflD1OYxDLbl6rMVw+U3bxD5Nuhex4TKqv9Aem4D0lVzQ== express@^4.0.0, express@^4.14.0, express@^4.17.1: - version "4.17.1" - resolved "https://registry.yarnpkg.com/express/-/express-4.17.1.tgz#4491fc38605cf51f8629d39c2b5d026f98a4c134" - integrity sha512-mHJ9O79RqluphRrcw2X/GTh3k9tVv8YcoyY4Kkh4WDMUYKRZUq0h1o0w2rrrxBqM7VoeUVqgb27xlEMXTnYt4g== + version "4.18.2" + resolved "https://registry.yarnpkg.com/express/-/express-4.18.2.tgz#3fabe08296e930c796c19e3c516979386ba9fd59" + integrity sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ== dependencies: - accepts "~1.3.7" + accepts "~1.3.8" array-flatten "1.1.1" - body-parser "1.19.0" - content-disposition "0.5.3" + body-parser "1.20.1" + content-disposition "0.5.4" content-type "~1.0.4" - cookie "0.4.0" + cookie "0.5.0" cookie-signature "1.0.6" debug "2.6.9" - depd "~1.1.2" + depd "2.0.0" encodeurl "~1.0.2" escape-html "~1.0.3" etag "~1.8.1" - finalhandler "~1.1.2" + finalhandler "1.2.0" fresh "0.5.2" + http-errors "2.0.0" merge-descriptors "1.0.1" methods "~1.1.2" - on-finished "~2.3.0" + on-finished "2.4.1" parseurl "~1.3.3" path-to-regexp "0.1.7" - proxy-addr "~2.0.5" - qs "6.7.0" + proxy-addr "~2.0.7" + qs "6.11.0" range-parser "~1.2.1" - safe-buffer "5.1.2" - send "0.17.1" - serve-static "1.14.1" - setprototypeof "1.1.1" - statuses "~1.5.0" + safe-buffer "5.2.1" + send "0.18.0" + serve-static "1.15.0" + setprototypeof "1.2.0" + statuses "2.0.1" type-is "~1.6.18" utils-merge "1.0.1" vary "~1.1.2" @@ -5249,17 +5257,17 @@ fill-range@^7.0.1: dependencies: to-regex-range "^5.0.1" -finalhandler@~1.1.2: - version "1.1.2" - resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.1.2.tgz#b7e7d000ffd11938d0fdb053506f6ebabe9f587d" - integrity sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA== +finalhandler@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.2.0.tgz#7d23fe5731b207b4640e4fcd00aec1f9207a7b32" + integrity sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg== dependencies: debug "2.6.9" encodeurl "~1.0.2" escape-html "~1.0.3" - on-finished "~2.3.0" + on-finished "2.4.1" parseurl "~1.3.3" - statuses "~1.5.0" + statuses "2.0.1" unpipe "~1.0.0" find-up@5.0.0: @@ -5400,7 +5408,7 @@ forwarded@0.2.0: fresh@0.5.2: version "0.5.2" resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.2.tgz#3d8cadd90d976569fa835ab1f8e4b23a105605a7" - integrity sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac= + integrity sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q== fs-capacitor@^2.0.4: version "2.0.4" @@ -5538,9 +5546,9 @@ get-caller-file@^2.0.1: integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== get-intrinsic@^1.0.2: - version "1.1.2" - resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.1.2.tgz#336975123e05ad0b7ba41f152ee4aadbea6cf598" - integrity sha512-Jfm3OyCxHh9DJyc28qGk+JmfkpO41A4XkneDSujN9MDXrm4oDKdHvndhZ2dN94+ERNfkYJWDclW6k2L/ZGHjXA== + version "1.1.3" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.1.3.tgz#063c84329ad93e83893c7f4f243ef63ffa351385" + integrity sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A== dependencies: function-bind "^1.1.1" has "^1.0.3" @@ -6147,16 +6155,16 @@ http-cache-semantics@^4.0.0: resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz#49e91c5cbf36c9b94bcfcd71c23d5249ec74e390" integrity sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ== -http-errors@1.7.2: - version "1.7.2" - resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.7.2.tgz#4f5029cf13239f31036e5b2e55292bcfbcc85c8f" - integrity sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg== +http-errors@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-2.0.0.tgz#b7774a1486ef73cf7667ac9ae0858c012c57b9d3" + integrity sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ== dependencies: - depd "~1.1.2" - inherits "2.0.3" - setprototypeof "1.1.1" - statuses ">= 1.5.0 < 2" - toidentifier "1.0.0" + depd "2.0.0" + inherits "2.0.4" + setprototypeof "1.2.0" + statuses "2.0.1" + toidentifier "1.0.1" http-errors@^1.7.3: version "1.8.0" @@ -6169,17 +6177,6 @@ http-errors@^1.7.3: statuses ">= 1.5.0 < 2" toidentifier "1.0.0" -http-errors@~1.7.2: - version "1.7.3" - resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.7.3.tgz#6c619e4f9c60308c38519498c14fbb10aacebb06" - integrity sha512-ZTTX0MWrsQ2ZAhA1cejAwDLycFsd7I7nVtnkT3Ol0aqodaKW+0CTZDQ1uBv5whptCnc8e8HeRRJxRs0kmm/Qfw== - dependencies: - depd "~1.1.2" - inherits "2.0.4" - setprototypeof "1.1.1" - statuses ">= 1.5.0 < 2" - toidentifier "1.0.0" - http-https@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/http-https/-/http-https-1.0.0.tgz#2f908dd5f1db4068c058cd6e6d4ce392c913389b" @@ -7875,7 +7872,7 @@ md5.js@^1.3.4: media-typer@0.3.0: version "0.3.0" resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748" - integrity sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g= + integrity sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ== memdown@1.4.1, memdown@^1.0.0: version "1.4.1" @@ -7897,7 +7894,7 @@ memorystream@^0.3.1: merge-descriptors@1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61" - integrity sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E= + integrity sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w== merge-stream@^1.0.0: version "1.0.1" @@ -7933,7 +7930,7 @@ merkle-patricia-tree@^2.1.2, merkle-patricia-tree@^2.3.2: methods@~1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee" - integrity sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4= + integrity sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w== micromatch@^2.3.7: version "2.3.11" @@ -7975,17 +7972,12 @@ mime-db@1.46.0: resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.46.0.tgz#6267748a7f799594de3cbc8cde91def349661cee" integrity sha512-svXaP8UQRZ5K7or+ZmfNhg2xX3yKDMUzqadsSqi4NCH/KomcH75MAMYAGVlvXn4+b/xOPhS3I2uHKRUzvjY7BQ== -mime-db@1.50.0: - version "1.50.0" - resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.50.0.tgz#abd4ac94e98d3c0e185016c67ab45d5fde40c11f" - integrity sha512-9tMZCDlYHqeERXEHO9f/hKfNXhre5dK2eE/krIvUjZbS2KPcqGDfNShIWS1uW9XOTKQKqK6qbeOci18rbfW77A== - mime-db@1.52.0: version "1.52.0" resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70" integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg== -mime-types@^2.1.12, mime-types@~2.1.19: +mime-types@^2.1.12, mime-types@~2.1.19, mime-types@~2.1.24, mime-types@~2.1.34: version "2.1.35" resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a" integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw== @@ -7999,13 +7991,6 @@ mime-types@^2.1.16: dependencies: mime-db "1.46.0" -mime-types@~2.1.24: - version "2.1.33" - resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.33.tgz#1fa12a904472fafd068e48d9e8401f74d3f70edb" - integrity sha512-plLElXp7pRDd0bNZHw+nMd52vRYjLwQjygaNg7ddJ2uJtTlmnTCjWuPKxVu6//AdaRuME84SvLW91sIkBqGT0g== - dependencies: - mime-db "1.50.0" - mime@1.6.0: version "1.6.0" resolved "https://registry.yarnpkg.com/mime/-/mime-1.6.0.tgz#32cd9e5c64553bd58d19a568af452acff04981b1" @@ -8192,19 +8177,14 @@ module@^1.2.5: ms@2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" - integrity sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g= - -ms@2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.1.tgz#30a5864eb3ebb0a66f2ebe6d727af06a09d86e0a" - integrity sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg== + integrity sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A== ms@2.1.2: version "2.1.2" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== -ms@^2.1.1: +ms@2.1.3, ms@^2.1.1: version "2.1.3" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== @@ -8399,10 +8379,10 @@ needle@^2.2.1: iconv-lite "^0.4.4" sax "^1.2.4" -negotiator@0.6.2: - version "0.6.2" - resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.2.tgz#feacf7ccf525a77ae9634436a64883ffeca346fb" - integrity sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw== +negotiator@0.6.3: + version "0.6.3" + resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.3.tgz#58e323a72fedc0d6f9cd4d31fe49f51479590ccd" + integrity sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg== next-tick@~1.0.0: version "1.0.0" @@ -8748,10 +8728,10 @@ oboe@2.1.5: dependencies: http-https "^1.0.0" -on-finished@~2.3.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947" - integrity sha1-IPEzZIGwg811M3mSoWlxqi2QaUc= +on-finished@2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.4.1.tgz#58c8c44116e54845ad57f14ab10b03533184ac3f" + integrity sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg== dependencies: ee-first "1.1.1" @@ -9124,7 +9104,7 @@ path-parse@^1.0.6: path-to-regexp@0.1.7: version "0.1.7" resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" - integrity sha1-32BBeABfUi8V60SQ5yR6G/qmf4w= + integrity sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ== path-type@^1.0.0: version "1.1.0" @@ -9642,7 +9622,7 @@ protons@^1.0.1: signed-varint "^2.0.1" varint "^5.0.0" -proxy-addr@~2.0.5: +proxy-addr@~2.0.7: version "2.0.7" resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-2.0.7.tgz#f19fe69ceab311eeb94b42e70e8c2070f9ba1025" integrity sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg== @@ -9725,12 +9705,7 @@ pure-rand@^4.1.1: resolved "https://registry.yarnpkg.com/pure-rand/-/pure-rand-4.1.2.tgz#cbad2a3e3ea6df0a8d80d8ba204779b5679a5205" integrity sha512-uLzZpQWfroIqyFWmX/pl0OL2JHJdoU3dbh0dvZ25fChHFJJi56J5oQZhW6QgbT2Llwh1upki84LnTwlZvsungA== -qs@6.7.0: - version "6.7.0" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.7.0.tgz#41dc1a015e3d581f1621776be31afb2876a9b1bc" - integrity sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ== - -qs@^6.4.0, qs@^6.5.2: +qs@6.11.0, qs@^6.4.0, qs@^6.5.2: version "6.11.0" resolved "https://registry.yarnpkg.com/qs/-/qs-6.11.0.tgz#fd0d963446f7a65e1367e01abd85429453f0c37a" integrity sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q== @@ -9817,13 +9792,13 @@ range-parser@~1.2.1: resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.1.tgz#3cf37023d199e1c24d1a55b84800c2f3e6468031" integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg== -raw-body@2.4.0: - version "2.4.0" - resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.4.0.tgz#a1ce6fb9c9bc356ca52e89256ab59059e13d0332" - integrity sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q== +raw-body@2.5.1: + version "2.5.1" + resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.5.1.tgz#fe1b1628b181b700215e5fd42389f98b71392857" + integrity sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig== dependencies: - bytes "3.1.0" - http-errors "1.7.2" + bytes "3.1.2" + http-errors "2.0.0" iconv-lite "0.4.24" unpipe "1.0.0" @@ -10292,16 +10267,16 @@ rustbn.js@~0.2.0: resolved "https://registry.yarnpkg.com/rustbn.js/-/rustbn.js-0.2.0.tgz#8082cb886e707155fd1cb6f23bd591ab8d55d0ca" integrity sha512-4VlvkRUuCJvr2J6Y0ImW7NvTCriMi7ErOAqWk1y69vAdoNIzCF3yPmgeNzx+RQTLEDFq5sHfscn1MwHxP9hNfA== -safe-buffer@5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1: - version "5.1.2" - resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" - integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== - -safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@^5.2.0, safe-buffer@^5.2.1, safe-buffer@~5.2.0: +safe-buffer@5.2.1, safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@^5.2.0, safe-buffer@^5.2.1, safe-buffer@~5.2.0: version "5.2.1" resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== +safe-buffer@~5.1.0, safe-buffer@~5.1.1: + version "5.1.2" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" + integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== + safe-event-emitter@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/safe-event-emitter/-/safe-event-emitter-1.0.1.tgz#5b692ef22329ed8f69fdce607e50ca734f6f20af" @@ -10415,24 +10390,24 @@ semver@~5.4.1: resolved "https://registry.yarnpkg.com/semver/-/semver-5.4.1.tgz#e059c09d8571f0540823733433505d3a2f00b18e" integrity sha512-WfG/X9+oATh81XtllIo/I8gOiY9EXRdv1cQdyykeXK17YcUW3EXUAi2To4pcH6nZtJPr7ZOpM5OMyWJZm+8Rsg== -send@0.17.1: - version "0.17.1" - resolved "https://registry.yarnpkg.com/send/-/send-0.17.1.tgz#c1d8b059f7900f7466dd4938bdc44e11ddb376c8" - integrity sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg== +send@0.18.0: + version "0.18.0" + resolved "https://registry.yarnpkg.com/send/-/send-0.18.0.tgz#670167cc654b05f5aa4a767f9113bb371bc706be" + integrity sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg== dependencies: debug "2.6.9" - depd "~1.1.2" - destroy "~1.0.4" + depd "2.0.0" + destroy "1.2.0" encodeurl "~1.0.2" escape-html "~1.0.3" etag "~1.8.1" fresh "0.5.2" - http-errors "~1.7.2" + http-errors "2.0.0" mime "1.6.0" - ms "2.1.1" - on-finished "~2.3.0" + ms "2.1.3" + on-finished "2.4.1" range-parser "~1.2.1" - statuses "~1.5.0" + statuses "2.0.1" sentence-case@^2.1.0: version "2.1.1" @@ -10449,15 +10424,15 @@ serialize-javascript@4.0.0: dependencies: randombytes "^2.1.0" -serve-static@1.14.1: - version "1.14.1" - resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.14.1.tgz#666e636dc4f010f7ef29970a88a674320898b2f9" - integrity sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg== +serve-static@1.15.0: + version "1.15.0" + resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.15.0.tgz#faaef08cffe0a1a62f60cad0c4e513cff0ac9540" + integrity sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g== dependencies: encodeurl "~1.0.2" escape-html "~1.0.3" parseurl "~1.3.3" - send "0.17.1" + send "0.18.0" servify@^0.1.12: version "0.1.12" @@ -10490,11 +10465,6 @@ setimmediate@^1.0.5: resolved "https://registry.yarnpkg.com/setimmediate/-/setimmediate-1.0.5.tgz#290cbb232e306942d7d7ea9b83732ab7856f8285" integrity sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA== -setprototypeof@1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.1.1.tgz#7e95acb24aa92f5885e0abef5ba131330d4ae683" - integrity sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw== - setprototypeof@1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.2.0.tgz#66c9a24a73f9fc28cbe66b09fed3d33dcaf1b424" @@ -10745,7 +10715,12 @@ stable@~0.1.8: resolved "https://registry.yarnpkg.com/stable/-/stable-0.1.8.tgz#836eb3c8382fe2936feaf544631017ce7d47a3cf" integrity sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w== -"statuses@>= 1.5.0 < 2", statuses@~1.5.0: +statuses@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/statuses/-/statuses-2.0.1.tgz#55cb000ccf1d48728bd23c685a063998cf1a1b63" + integrity sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ== + +"statuses@>= 1.5.0 < 2": version "1.5.0" resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.5.0.tgz#161c7dac177659fd9811f43771fa99381478628c" integrity sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow= @@ -11257,6 +11232,11 @@ toidentifier@1.0.0: resolved "https://registry.yarnpkg.com/toidentifier/-/toidentifier-1.0.0.tgz#7e1be3470f1e77948bc43d94a3c8f4d7752ba553" integrity sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw== +toidentifier@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/toidentifier/-/toidentifier-1.0.1.tgz#3be34321a88a820ed1bd80dfaa33e479fbb8dd35" + integrity sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA== + tough-cookie@^2.2.0, tough-cookie@^2.3.1, tough-cookie@~2.5.0: version "2.5.0" resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.5.0.tgz#cd9fb2a0aa1d5a12b473bd9fb96fa3dcff65ade2" @@ -11356,7 +11336,7 @@ type-check@~0.3.2: dependencies: prelude-ls "~1.1.2" -type-is@^1.6.16, type-is@~1.6.17, type-is@~1.6.18: +type-is@^1.6.16, type-is@~1.6.18: version "1.6.18" resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.18.tgz#4e552cd05df09467dcbc4ef739de89f2cf37c131" integrity sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g== @@ -11490,7 +11470,7 @@ unixify@1.0.0: unpipe@1.0.0, unpipe@~1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec" - integrity sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw= + integrity sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ== upper-case-first@^1.1.0, upper-case-first@^1.1.2: version "1.1.2" @@ -11599,7 +11579,7 @@ util@^0.12.0: utils-merge@1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.1.tgz#9f95710f50a267947b2ccc124741c1028427e713" - integrity sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM= + integrity sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA== uuid@2.0.1: version "2.0.1" @@ -11657,7 +11637,7 @@ varint@^5.0.0, varint@~5.0.0: vary@^1, vary@~1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc" - integrity sha1-IpnwLG3tMNSllhsLn3RSShj2NPw= + integrity sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg== verror@1.10.0: version "1.10.0" diff --git a/tests/integration-tests/config.simple.toml b/tests/runner-tests/config.simple.toml similarity index 100% rename from tests/integration-tests/config.simple.toml rename to tests/runner-tests/config.simple.toml diff --git a/tests/integration-tests/data-source-revert/abis/Contract.abi b/tests/runner-tests/data-source-revert/abis/Contract.abi similarity index 100% rename from tests/integration-tests/data-source-revert/abis/Contract.abi rename to tests/runner-tests/data-source-revert/abis/Contract.abi diff --git a/tests/runner-tests/data-source-revert/grafted.yaml b/tests/runner-tests/data-source-revert/grafted.yaml new file mode 100644 index 00000000000..96703b41964 --- /dev/null +++ b/tests/runner-tests/data-source-revert/grafted.yaml @@ -0,0 +1,66 @@ +specVersion: 0.0.4 +features: + - grafting +schema: + file: ./schema.graphql +graft: + # Must match the id from building `subgraph.yaml` + base: QmX8y4Vwg7pqEMa94GmuT8RRRTJNVKdQTT6Yq8Zw3Vvpd6 + block: 3 +dataSources: + - kind: ethereum/contract + name: Contract + network: test + source: + address: "0xCfEB869F69431e42cdB54A4F4f105C19C080A601" + abi: Contract + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Gravatar + abis: + - name: Contract + file: ./abis/Contract.abi + blockHandlers: + - handler: handleBlock + file: ./src/mapping.ts + # Tests that adding a data source is possible in a graft + - kind: ethereum/contract + name: Contract2 + network: test + source: + address: "0xCfEB869F69431e42cdB54A4F4f105C19C080A601" + abi: Contract + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Gravatar + abis: + - name: Contract + file: ./abis/Contract.abi + callHandlers: + - handler: handleBlock + function: emitTrigger(uint16) + file: ./src/mapping.ts +templates: + - kind: ethereum/contract + name: Template + network: test + source: + abi: Contract + mapping: + kind: ethereum/events + apiVersion: 0.0.6 + language: wasm/assemblyscript + entities: + - Gravatar + abis: + - name: Contract + file: ./abis/Contract.abi + blockHandlers: + - handler: handleBlockTemplate + file: ./src/mapping.ts diff --git a/tests/integration-tests/data-source-revert/package.json b/tests/runner-tests/data-source-revert/package.json similarity index 91% rename from tests/integration-tests/data-source-revert/package.json rename to tests/runner-tests/data-source-revert/package.json index 73208954aa4..813b91748ef 100644 --- a/tests/integration-tests/data-source-revert/package.json +++ b/tests/runner-tests/data-source-revert/package.json @@ -2,7 +2,7 @@ "name": "data-source-revert", "version": "0.1.0", "scripts": { - "codegen": "graph codegen", + "codegen": "graph codegen --skip-migrations", "deploy:test": "graph deploy test/data-source-revert --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI", "deploy:test-grafted": "graph deploy test/data-source-revert-grafted grafted.yaml --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" }, @@ -10,4 +10,4 @@ "@graphprotocol/graph-cli": "https://github.com/graphprotocol/graph-cli#main", "@graphprotocol/graph-ts": "https://github.com/graphprotocol/graph-ts#main" } -} \ No newline at end of file +} diff --git a/tests/integration-tests/data-source-revert/schema.graphql b/tests/runner-tests/data-source-revert/schema.graphql similarity index 100% rename from tests/integration-tests/data-source-revert/schema.graphql rename to tests/runner-tests/data-source-revert/schema.graphql diff --git a/tests/integration-tests/data-source-revert/src/mapping.ts b/tests/runner-tests/data-source-revert/src/mapping.ts similarity index 96% rename from tests/integration-tests/data-source-revert/src/mapping.ts rename to tests/runner-tests/data-source-revert/src/mapping.ts index 36b326f6110..feb6f313bbc 100644 --- a/tests/integration-tests/data-source-revert/src/mapping.ts +++ b/tests/runner-tests/data-source-revert/src/mapping.ts @@ -11,6 +11,7 @@ import { DataSourceCount } from "../generated/schema"; export function handleBlock(block: ethereum.Block): void { let context = new DataSourceContext(); context.setBigInt("number", block.number); + context.setBytes("hash", block.hash); Template.createWithContext( changetype
(Address.fromHexString( diff --git a/tests/integration-tests/data-source-revert/subgraph.yaml b/tests/runner-tests/data-source-revert/subgraph.yaml similarity index 100% rename from tests/integration-tests/data-source-revert/subgraph.yaml rename to tests/runner-tests/data-source-revert/subgraph.yaml diff --git a/tests/integration-tests/fatal-error/abis/Contract.abi b/tests/runner-tests/data-source-revert2/abis/Contract.abi similarity index 100% rename from tests/integration-tests/fatal-error/abis/Contract.abi rename to tests/runner-tests/data-source-revert2/abis/Contract.abi diff --git a/tests/runner-tests/data-source-revert2/package.json b/tests/runner-tests/data-source-revert2/package.json new file mode 100644 index 00000000000..50c61bc7d79 --- /dev/null +++ b/tests/runner-tests/data-source-revert2/package.json @@ -0,0 +1,12 @@ +{ + "name": "data-source-revert2", + "version": "0.1.0", + "scripts": { + "codegen": "graph codegen --skip-migrations", + "deploy:test": "graph deploy test/data-source-revert2 --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" + }, + "devDependencies": { + "@graphprotocol/graph-cli": "https://github.com/graphprotocol/graph-cli#main", + "@graphprotocol/graph-ts": "https://github.com/graphprotocol/graph-ts#main" + } +} diff --git a/tests/runner-tests/data-source-revert2/schema.graphql b/tests/runner-tests/data-source-revert2/schema.graphql new file mode 100644 index 00000000000..6c007b3245b --- /dev/null +++ b/tests/runner-tests/data-source-revert2/schema.graphql @@ -0,0 +1,5 @@ +# The `id` is the block number and `count` the handler invocations at that block. +type DataSourceCount @entity { + id: ID! + count: Int! +} diff --git a/tests/runner-tests/data-source-revert2/src/mapping.ts b/tests/runner-tests/data-source-revert2/src/mapping.ts new file mode 100644 index 00000000000..feb6f313bbc --- /dev/null +++ b/tests/runner-tests/data-source-revert2/src/mapping.ts @@ -0,0 +1,39 @@ +import { + ethereum, + DataSourceContext, + dataSource, + Address, + BigInt, +} from "@graphprotocol/graph-ts"; +import { Template } from "../generated/templates"; +import { DataSourceCount } from "../generated/schema"; + +export function handleBlock(block: ethereum.Block): void { + let context = new DataSourceContext(); + context.setBigInt("number", block.number); + context.setBytes("hash", block.hash); + + Template.createWithContext( + changetype
(Address.fromHexString( + "0x2E645469f354BB4F5c8a05B3b30A929361cf77eC" + )), + context + ); +} + +export function handleBlockTemplate(block: ethereum.Block): void { + let count = DataSourceCount.load(block.number.toString()); + if (count == null) { + count = new DataSourceCount(block.number.toString()); + count.count = 0; + } + + let ctx = dataSource.context(); + let number = ctx.getBigInt("number"); + assert( + count.count == number.toI32(), + "wrong count, found " + BigInt.fromI32(count.count).toString() + ); + count.count += 1; + count.save(); +} diff --git a/tests/integration-tests/data-source-revert/grafted.yaml b/tests/runner-tests/data-source-revert2/subgraph.yaml similarity index 84% rename from tests/integration-tests/data-source-revert/grafted.yaml rename to tests/runner-tests/data-source-revert2/subgraph.yaml index 64e9180a1fc..52e02b01478 100644 --- a/tests/integration-tests/data-source-revert/grafted.yaml +++ b/tests/runner-tests/data-source-revert2/subgraph.yaml @@ -1,12 +1,6 @@ specVersion: 0.0.4 -features: - - grafting schema: file: ./schema.graphql -graft: - # Must match the id from building `subgraph.yaml` - base: QmW6kCVyaV3d37Pm9ZZKZAWU9G6oSooJiarjBKrqLE9FJy - block: 3 dataSources: - kind: ethereum/contract name: Contract @@ -19,7 +13,7 @@ dataSources: apiVersion: 0.0.6 language: wasm/assemblyscript entities: - - Gravatar + - Gravatar2 abis: - name: Contract file: ./abis/Contract.abi diff --git a/tests/integration-tests/typename/abis/Contract.abi b/tests/runner-tests/dynamic-data-source/abis/Contract.abi similarity index 100% rename from tests/integration-tests/typename/abis/Contract.abi rename to tests/runner-tests/dynamic-data-source/abis/Contract.abi diff --git a/tests/runner-tests/dynamic-data-source/package.json b/tests/runner-tests/dynamic-data-source/package.json new file mode 100644 index 00000000000..7e31db5c444 --- /dev/null +++ b/tests/runner-tests/dynamic-data-source/package.json @@ -0,0 +1,25 @@ +{ + "name": "dynamic-data-source", + "version": "0.1.0", + "scripts": { + "build-contracts": "../../common/build-contracts.sh", + "codegen": "graph codegen --skip-migrations", + "test": "yarn build-contracts && truffle test --compile-none --network test", + "create:test": "graph create test/dynamic-data-source --node $GRAPH_NODE_ADMIN_URI", + "deploy:test": "graph deploy test/dynamic-data-source --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" + }, + "devDependencies": { + "@graphprotocol/graph-cli": "https://github.com/graphprotocol/graph-cli#main", + "@graphprotocol/graph-ts": "https://github.com/graphprotocol/graph-ts#main", + "solc": "^0.8.2" + }, + "dependencies": { + "@truffle/contract": "^4.3", + "@truffle/hdwallet-provider": "^1.2", + "apollo-fetch": "^0.7.0", + "babel-polyfill": "^6.26.0", + "babel-register": "^6.26.0", + "gluegun": "^4.6.1", + "truffle": "^5.2" + } +} diff --git a/tests/runner-tests/dynamic-data-source/schema.graphql b/tests/runner-tests/dynamic-data-source/schema.graphql new file mode 100644 index 00000000000..e5356472879 --- /dev/null +++ b/tests/runner-tests/dynamic-data-source/schema.graphql @@ -0,0 +1,4 @@ +type Foo @entity { + id: ID! + value: String! +} diff --git a/tests/runner-tests/dynamic-data-source/src/mapping.ts b/tests/runner-tests/dynamic-data-source/src/mapping.ts new file mode 100644 index 00000000000..ec69d2136b3 --- /dev/null +++ b/tests/runner-tests/dynamic-data-source/src/mapping.ts @@ -0,0 +1,16 @@ +import { Trigger } from "../generated/Contract/Contract"; +import {Foo} from "../generated/schema"; + + +export function handleTrigger(event: Trigger): void { + let id = `${event.block.hash.toHexString()}${event.address.toHexString()}`; + let foo = new Foo(id); + foo.save(); +} + + + + + + + diff --git a/tests/integration-tests/file-data-sources/subgraph.yaml b/tests/runner-tests/dynamic-data-source/subgraph.yaml similarity index 62% rename from tests/integration-tests/file-data-sources/subgraph.yaml rename to tests/runner-tests/dynamic-data-source/subgraph.yaml index d13fefe163c..3f7efbaaca0 100644 --- a/tests/integration-tests/file-data-sources/subgraph.yaml +++ b/tests/runner-tests/dynamic-data-source/subgraph.yaml @@ -1,4 +1,4 @@ -specVersion: 0.0.7 +specVersion: 0.0.4 schema: file: ./schema.graphql dataSources: @@ -10,27 +10,33 @@ dataSources: abi: Contract mapping: kind: ethereum/events - apiVersion: 0.0.7 + apiVersion: 0.0.6 language: wasm/assemblyscript - entities: - - Gravatar abis: - name: Contract file: ./abis/Contract.abi - blockHandlers: - - handler: handleBlock + entities: + - Call + eventHandlers: + - event: Trigger(uint16) + handler: handleTrigger file: ./src/mapping.ts templates: - - kind: file/ipfs - name: File + - kind: ethereum/contract + name: Dynamic + network: test + source: + abi: Contract mapping: kind: ethereum/events - apiVersion: 0.0.7 + apiVersion: 0.0.6 language: wasm/assemblyscript - entities: - - Gravatar abis: - name: Contract file: ./abis/Contract.abi - handler: handleFile + entities: + - Call + eventHandlers: + - event: Trigger(uint16) + handler: handleTrigger file: ./src/mapping.ts diff --git a/tests/runner-tests/fatal-error/abis/Contract.abi b/tests/runner-tests/fatal-error/abis/Contract.abi new file mode 100644 index 00000000000..02da1a9e7f3 --- /dev/null +++ b/tests/runner-tests/fatal-error/abis/Contract.abi @@ -0,0 +1,33 @@ +[ + { + "inputs": [], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint16", + "name": "x", + "type": "uint16" + } + ], + "name": "Trigger", + "type": "event" + }, + { + "inputs": [ + { + "internalType": "uint16", + "name": "x", + "type": "uint16" + } + ], + "name": "emitTrigger", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } +] diff --git a/tests/integration-tests/fatal-error/package.json b/tests/runner-tests/fatal-error/package.json similarity index 67% rename from tests/integration-tests/fatal-error/package.json rename to tests/runner-tests/fatal-error/package.json index 17d5eb2fe1e..2df8ce04bf0 100644 --- a/tests/integration-tests/fatal-error/package.json +++ b/tests/runner-tests/fatal-error/package.json @@ -2,9 +2,7 @@ "name": "fatal-error", "version": "0.1.0", "scripts": { - "build-contracts": "../common/build-contracts.sh", - "codegen": "graph codegen", - "test": "yarn build-contracts && truffle test --compile-none --network test", + "codegen": "graph codegen --skip-migrations", "create:test": "graph create test/fatal-error --node $GRAPH_NODE_ADMIN_URI", "deploy:test": "graph deploy test/fatal-error --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" }, @@ -14,12 +12,9 @@ "solc": "^0.8.2" }, "dependencies": { - "@truffle/contract": "^4.3", - "@truffle/hdwallet-provider": "^1.2", "apollo-fetch": "^0.7.0", "babel-polyfill": "^6.26.0", "babel-register": "^6.26.0", - "gluegun": "^4.6.1", - "truffle": "^5.2" + "gluegun": "^4.6.1" } } diff --git a/tests/integration-tests/fatal-error/schema.graphql b/tests/runner-tests/fatal-error/schema.graphql similarity index 100% rename from tests/integration-tests/fatal-error/schema.graphql rename to tests/runner-tests/fatal-error/schema.graphql diff --git a/tests/runner-tests/fatal-error/src/mapping.ts b/tests/runner-tests/fatal-error/src/mapping.ts new file mode 100644 index 00000000000..4ff6693322e --- /dev/null +++ b/tests/runner-tests/fatal-error/src/mapping.ts @@ -0,0 +1,7 @@ +import { ethereum } from "@graphprotocol/graph-ts"; + +export function handleBlock(block: ethereum.Block): void { + if (block.number.toI32() == 3) { + assert(false) + } +} diff --git a/tests/integration-tests/fatal-error/subgraph.yaml b/tests/runner-tests/fatal-error/subgraph.yaml similarity index 76% rename from tests/integration-tests/fatal-error/subgraph.yaml rename to tests/runner-tests/fatal-error/subgraph.yaml index 09e3122864c..431e173ec70 100644 --- a/tests/integration-tests/fatal-error/subgraph.yaml +++ b/tests/runner-tests/fatal-error/subgraph.yaml @@ -1,4 +1,4 @@ -specVersion: 0.0.2 +specVersion: 0.0.4 schema: file: ./schema.graphql dataSources: @@ -10,14 +10,13 @@ dataSources: abi: Contract mapping: kind: ethereum/events - apiVersion: 0.0.5 + apiVersion: 0.0.6 language: wasm/assemblyscript abis: - name: Contract file: ./abis/Contract.abi entities: - Call - eventHandlers: - - event: Trigger(uint16) - handler: handleTrigger + blockHandlers: + - handler: handleBlock file: ./src/mapping.ts diff --git a/tests/runner-tests/file-data-sources/abis/Contract.abi b/tests/runner-tests/file-data-sources/abis/Contract.abi new file mode 100644 index 00000000000..9d9f56b9263 --- /dev/null +++ b/tests/runner-tests/file-data-sources/abis/Contract.abi @@ -0,0 +1,15 @@ +[ + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "string", + "name": "testCommand", + "type": "string" + } + ], + "name": "TestEvent", + "type": "event" + } +] diff --git a/tests/integration-tests/file-data-sources/package.json b/tests/runner-tests/file-data-sources/package.json similarity index 90% rename from tests/integration-tests/file-data-sources/package.json rename to tests/runner-tests/file-data-sources/package.json index 7c68f0411a7..5aa79b0f6b3 100644 --- a/tests/integration-tests/file-data-sources/package.json +++ b/tests/runner-tests/file-data-sources/package.json @@ -2,7 +2,7 @@ "name": "file-data-sources", "version": "0.1.0", "scripts": { - "codegen": "graph codegen", + "codegen": "graph codegen --skip-migrations", "create:test": "graph create test/file-data-sources --node $GRAPH_NODE_ADMIN_URI", "deploy:test": "graph deploy test/file-data-sources --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" }, @@ -10,4 +10,4 @@ "@graphprotocol/graph-cli": "https://github.com/graphprotocol/graph-cli#main", "@graphprotocol/graph-ts": "https://github.com/graphprotocol/graph-ts#main" } -} \ No newline at end of file +} diff --git a/tests/runner-tests/file-data-sources/schema.graphql b/tests/runner-tests/file-data-sources/schema.graphql new file mode 100644 index 00000000000..48408ae69a2 --- /dev/null +++ b/tests/runner-tests/file-data-sources/schema.graphql @@ -0,0 +1,9 @@ +type IpfsFile @entity { + id: ID! + content: String! +} + +type IpfsFile1 @entity { + id: ID! + content: String! +} \ No newline at end of file diff --git a/tests/runner-tests/file-data-sources/src/mapping.ts b/tests/runner-tests/file-data-sources/src/mapping.ts new file mode 100644 index 00000000000..dbd8013908d --- /dev/null +++ b/tests/runner-tests/file-data-sources/src/mapping.ts @@ -0,0 +1,87 @@ +import { ethereum, dataSource, BigInt, Bytes, DataSourceContext } from '@graphprotocol/graph-ts' +import { TestEvent } from '../generated/Contract/Contract' +import { IpfsFile, IpfsFile1 } from '../generated/schema' + +// CID of `file-data-sources/abis/Contract.abi` after being processed by graph-cli. +const KNOWN_CID = "QmQ2REmceVtzawp7yrnxLQXgNNCtFHEnig6fL9aqE1kcWq" + +export function handleBlock(block: ethereum.Block): void { + let entity = new IpfsFile("onchain") + entity.content = "onchain" + entity.save() + + // This will create the same data source twice, once at block 0 and another at block 2. + // The creation at block 2 should be detected as a duplicate and therefore a noop. + if (block.number == BigInt.fromI32(0) || block.number == BigInt.fromI32(2)) { + dataSource.create("File", [KNOWN_CID]) + } + + if (block.number == BigInt.fromI32(1)) { + let entity = IpfsFile.load("onchain")! + assert(entity.content == "onchain") + + // The test assumes file data sources are processed in the block in which they are created. + // So the ds created at block 0 will have been processed. + // + // Test that onchain data sources cannot read offchain data. + assert(IpfsFile.load(KNOWN_CID) == null); + + // Test that using an invalid CID will be ignored + dataSource.create("File", ["hi, I'm not valid"]) + } + + + // This will invoke File1 data source with same CID, which will be used + // to test whether same cid is triggered across different data source. + if (block.number == BigInt.fromI32(3)) { + // Test that onchain data sources cannot read offchain data (again, but this time more likely to hit the DB than the write queue). + assert(IpfsFile.load(KNOWN_CID) == null); + + dataSource.create("File1", [KNOWN_CID]) + } +} + +export function handleTestEvent(event: TestEvent): void { + let command = event.params.testCommand; + + if (command == "createFile2") { + // Will fail the subgraph when processed due to mismatch in the entity type and 'entities'. + dataSource.create("File2", [KNOWN_CID]) + } else if (command == "saveConflictingEntity") { + // Will fail the subgraph because the same entity has been created in a file data source. + let entity = new IpfsFile(KNOWN_CID) + entity.content = "empty" + entity.save() + } else if (command == "createFile1") { + // Will fail the subgraph with a conflict between two entities created by offchain data sources. + let context = new DataSourceContext(); + context.setBytes("hash", event.block.hash); + dataSource.createWithContext("File1", [KNOWN_CID], context) + } else { + assert(false, "Unknown command: " + command); + } +} + +export function handleFile(data: Bytes): void { + // Test that offchain data sources cannot read onchain data. + assert(IpfsFile.load("onchain") == null); + + if (dataSource.stringParam() != "QmVkvoPGi9jvvuxsHDVJDgzPEzagBaWSZRYoRDzU244HjZ") { + // Test that an offchain data source cannot read from another offchain data source. + assert(IpfsFile.load("QmVkvoPGi9jvvuxsHDVJDgzPEzagBaWSZRYoRDzU244HjZ") == null); + } + + let entity = new IpfsFile(dataSource.stringParam()) + entity.content = data.toString() + entity.save() + + // Test that an offchain data source can load its own entities + let loaded_entity = IpfsFile.load(dataSource.stringParam())! + assert(loaded_entity.content == entity.content) +} + +export function handleFile1(data: Bytes): void { + let entity = new IpfsFile1(dataSource.stringParam()) + entity.content = data.toString() + entity.save() +} diff --git a/tests/runner-tests/file-data-sources/subgraph.yaml b/tests/runner-tests/file-data-sources/subgraph.yaml new file mode 100644 index 00000000000..502faccdff6 --- /dev/null +++ b/tests/runner-tests/file-data-sources/subgraph.yaml @@ -0,0 +1,65 @@ +specVersion: 0.0.7 +schema: + file: ./schema.graphql +dataSources: + - kind: ethereum/contract + name: Contract + network: test + source: + address: "0x0000000000000000000000000000000000000000" + abi: Contract + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - Gravatar + abis: + - name: Contract + file: ./abis/Contract.abi + blockHandlers: + - handler: handleBlock + eventHandlers: + - event: TestEvent(string) + handler: handleTestEvent + file: ./src/mapping.ts +templates: + - kind: file/ipfs + name: File + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - IpfsFile + abis: + - name: Contract + file: ./abis/Contract.abi + handler: handleFile + file: ./src/mapping.ts + - kind: file/ipfs + name: File1 + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - IpfsFile1 + abis: + - name: Contract + file: ./abis/Contract.abi + handler: handleFile1 + file: ./src/mapping.ts + - kind: file/ipfs + name: File2 + mapping: + kind: ethereum/events + apiVersion: 0.0.7 + language: wasm/assemblyscript + entities: + - IpfsFile # will trigger an error, should be IpfsFile1 + abis: + - name: Contract + file: ./abis/Contract.abi + handler: handleFile1 + file: ./src/mapping.ts \ No newline at end of file diff --git a/tests/runner-tests/package.json b/tests/runner-tests/package.json new file mode 100644 index 00000000000..c35ae39ad3d --- /dev/null +++ b/tests/runner-tests/package.json @@ -0,0 +1,11 @@ +{ + "private": true, + "workspaces": [ + "data-source-revert", + "data-source-revert2", + "dynamic-data-source", + "fatal-error", + "file-data-sources", + "typename" + ] +} diff --git a/tests/runner-tests/typename/abis/Contract.abi b/tests/runner-tests/typename/abis/Contract.abi new file mode 100644 index 00000000000..02da1a9e7f3 --- /dev/null +++ b/tests/runner-tests/typename/abis/Contract.abi @@ -0,0 +1,33 @@ +[ + { + "inputs": [], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint16", + "name": "x", + "type": "uint16" + } + ], + "name": "Trigger", + "type": "event" + }, + { + "inputs": [ + { + "internalType": "uint16", + "name": "x", + "type": "uint16" + } + ], + "name": "emitTrigger", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } +] diff --git a/tests/integration-tests/typename/package.json b/tests/runner-tests/typename/package.json similarity index 90% rename from tests/integration-tests/typename/package.json rename to tests/runner-tests/typename/package.json index ee6d5e7c70a..4d4d46b63e6 100644 --- a/tests/integration-tests/typename/package.json +++ b/tests/runner-tests/typename/package.json @@ -2,7 +2,7 @@ "name": "typename", "version": "0.1.0", "scripts": { - "codegen": "graph codegen", + "codegen": "graph codegen --skip-migrations", "create:test": "graph create test/typename --node $GRAPH_NODE_ADMIN_URI", "deploy:test": "graph deploy test/typename --version-label v0.0.1 --ipfs $IPFS_URI --node $GRAPH_NODE_ADMIN_URI" }, diff --git a/tests/integration-tests/typename/schema.graphql b/tests/runner-tests/typename/schema.graphql similarity index 100% rename from tests/integration-tests/typename/schema.graphql rename to tests/runner-tests/typename/schema.graphql diff --git a/tests/integration-tests/typename/src/mapping.ts b/tests/runner-tests/typename/src/mapping.ts similarity index 100% rename from tests/integration-tests/typename/src/mapping.ts rename to tests/runner-tests/typename/src/mapping.ts diff --git a/tests/integration-tests/typename/subgraph.yaml b/tests/runner-tests/typename/subgraph.yaml similarity index 96% rename from tests/integration-tests/typename/subgraph.yaml rename to tests/runner-tests/typename/subgraph.yaml index cc95116e077..1c6ff2eb186 100644 --- a/tests/integration-tests/typename/subgraph.yaml +++ b/tests/runner-tests/typename/subgraph.yaml @@ -13,7 +13,7 @@ dataSources: abi: Contract mapping: kind: ethereum/events - apiVersion: 0.0.5 + apiVersion: 0.0.6 language: wasm/assemblyscript entities: - ExampleEntity diff --git a/tests/runner-tests/yarn.lock b/tests/runner-tests/yarn.lock new file mode 100644 index 00000000000..df7ba6fc49a --- /dev/null +++ b/tests/runner-tests/yarn.lock @@ -0,0 +1,12557 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@apollo/client@^3.1.5": + version "3.3.11" + resolved "https://registry.yarnpkg.com/@apollo/client/-/client-3.3.11.tgz#125051405e83dc899d471d43b79fd6045d92a802" + integrity sha512-54+D5FB6RJlQ+g37f432gaexnyvDsG5X6L9VO5kqN54HJlbF8hCf/8CXtAQEHCWodAwZhy6kOLp2RM96829q3A== + dependencies: + "@graphql-typed-document-node/core" "^3.0.0" + "@types/zen-observable" "^0.8.0" + "@wry/context" "^0.5.2" + "@wry/equality" "^0.3.0" + fast-json-stable-stringify "^2.0.0" + graphql-tag "^2.12.0" + hoist-non-react-statics "^3.3.2" + optimism "^0.14.0" + prop-types "^15.7.2" + symbol-observable "^2.0.0" + ts-invariant "^0.6.0" + tslib "^1.10.0" + zen-observable "^0.8.14" + +"@apollo/protobufjs@1.2.2": + version "1.2.2" + resolved "https://registry.yarnpkg.com/@apollo/protobufjs/-/protobufjs-1.2.2.tgz#4bd92cd7701ccaef6d517cdb75af2755f049f87c" + integrity sha512-vF+zxhPiLtkwxONs6YanSt1EpwpGilThpneExUN5K3tCymuxNnVq2yojTvnpRjv2QfsEIt/n7ozPIIzBLwGIDQ== + dependencies: + "@protobufjs/aspromise" "^1.1.2" + "@protobufjs/base64" "^1.1.2" + "@protobufjs/codegen" "^2.0.4" + "@protobufjs/eventemitter" "^1.1.0" + "@protobufjs/fetch" "^1.1.0" + "@protobufjs/float" "^1.0.2" + "@protobufjs/inquire" "^1.1.0" + "@protobufjs/path" "^1.1.2" + "@protobufjs/pool" "^1.1.0" + "@protobufjs/utf8" "^1.1.0" + "@types/long" "^4.0.0" + "@types/node" "^10.1.0" + long "^4.0.0" + +"@apollographql/apollo-tools@^0.5.0": + version "0.5.2" + resolved "https://registry.yarnpkg.com/@apollographql/apollo-tools/-/apollo-tools-0.5.2.tgz#01750a655731a198c3634ee819c463254a7c7767" + integrity sha512-KxZiw0Us3k1d0YkJDhOpVH5rJ+mBfjXcgoRoCcslbgirjgLotKMzOcx4PZ7YTEvvEROmvG7X3Aon41GvMmyGsw== + +"@apollographql/graphql-playground-html@1.6.27": + version "1.6.27" + resolved "https://registry.yarnpkg.com/@apollographql/graphql-playground-html/-/graphql-playground-html-1.6.27.tgz#bc9ab60e9445aa2a8813b4e94f152fa72b756335" + integrity sha512-tea2LweZvn6y6xFV11K0KC8ETjmm52mQrW+ezgB2O/aTQf8JGyFmMcRPFgUaQZeHbWdm8iisDC6EjOKsXu0nfw== + dependencies: + xss "^1.0.8" + +"@apollographql/graphql-upload-8-fork@^8.1.3": + version "8.1.3" + resolved "https://registry.yarnpkg.com/@apollographql/graphql-upload-8-fork/-/graphql-upload-8-fork-8.1.3.tgz#a0d4e0d5cec8e126d78bd915c264d6b90f5784bc" + integrity sha512-ssOPUT7euLqDXcdVv3Qs4LoL4BPtfermW1IOouaqEmj36TpHYDmYDIbKoSQxikd9vtMumFnP87OybH7sC9fJ6g== + dependencies: + "@types/express" "*" + "@types/fs-capacitor" "*" + "@types/koa" "*" + busboy "^0.3.1" + fs-capacitor "^2.0.4" + http-errors "^1.7.3" + object-path "^0.11.4" + +"@ardatan/aggregate-error@0.0.6": + version "0.0.6" + resolved "https://registry.yarnpkg.com/@ardatan/aggregate-error/-/aggregate-error-0.0.6.tgz#fe6924771ea40fc98dc7a7045c2e872dc8527609" + integrity sha512-vyrkEHG1jrukmzTPtyWB4NLPauUw5bQeg4uhn8f+1SSynmrOcyvlb1GKQjjgoBzElLdfXCRYX8UnBlhklOHYRQ== + dependencies: + tslib "~2.0.1" + +"@babel/code-frame@^7.0.0": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.18.6.tgz#3b25d38c89600baa2dcc219edfa88a74eb2c427a" + integrity sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q== + dependencies: + "@babel/highlight" "^7.18.6" + +"@babel/code-frame@^7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.12.13.tgz#dcfc826beef65e75c50e21d3837d7d95798dd658" + integrity sha512-HV1Cm0Q3ZrpCR93tkWOYiuYIgLxZXZFVG2VgK+MBWjUqZTundupbfx2aXarXuw5Ko5aMcjtJgbSs4vUGBS5v6g== + dependencies: + "@babel/highlight" "^7.12.13" + +"@babel/compat-data@^7.13.0", "@babel/compat-data@^7.13.8": + version "7.13.8" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.13.8.tgz#5b783b9808f15cef71547f1b691f34f8ff6003a6" + integrity sha512-EaI33z19T4qN3xLXsGf48M2cDqa6ei9tPZlfLdb2HC+e/cFtREiRd8hdSqDbwdLB0/+gLwqJmCYASH0z2bUdog== + +"@babel/core@^7.0.0": + version "7.13.10" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.13.10.tgz#07de050bbd8193fcd8a3c27918c0890613a94559" + integrity sha512-bfIYcT0BdKeAZrovpMqX2Mx5NrgAckGbwT982AkdS5GNfn3KMGiprlBAtmBcFZRUmpaufS6WZFP8trvx8ptFDw== + dependencies: + "@babel/code-frame" "^7.12.13" + "@babel/generator" "^7.13.9" + "@babel/helper-compilation-targets" "^7.13.10" + "@babel/helper-module-transforms" "^7.13.0" + "@babel/helpers" "^7.13.10" + "@babel/parser" "^7.13.10" + "@babel/template" "^7.12.13" + "@babel/traverse" "^7.13.0" + "@babel/types" "^7.13.0" + convert-source-map "^1.7.0" + debug "^4.1.0" + gensync "^1.0.0-beta.2" + json5 "^2.1.2" + lodash "^4.17.19" + semver "^6.3.0" + source-map "^0.5.0" + +"@babel/generator@^7.12.13", "@babel/generator@^7.13.0", "@babel/generator@^7.13.9", "@babel/generator@^7.5.0": + version "7.13.9" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.13.9.tgz#3a7aa96f9efb8e2be42d38d80e2ceb4c64d8de39" + integrity sha512-mHOOmY0Axl/JCTkxTU6Lf5sWOg/v8nUa+Xkt4zMTftX0wqmb6Sh7J8gvcehBw7q0AhrhAR+FDacKjCZ2X8K+Sw== + dependencies: + "@babel/types" "^7.13.0" + jsesc "^2.5.1" + source-map "^0.5.0" + +"@babel/helper-annotate-as-pure@^7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.12.13.tgz#0f58e86dfc4bb3b1fcd7db806570e177d439b6ab" + integrity sha512-7YXfX5wQ5aYM/BOlbSccHDbuXXFPxeoUmfWtz8le2yTkTZc+BxsiEnENFoi2SlmA8ewDkG2LgIMIVzzn2h8kfw== + dependencies: + "@babel/types" "^7.12.13" + +"@babel/helper-compilation-targets@^7.13.0", "@babel/helper-compilation-targets@^7.13.10", "@babel/helper-compilation-targets@^7.13.8": + version "7.13.10" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.13.10.tgz#1310a1678cb8427c07a753750da4f8ce442bdd0c" + integrity sha512-/Xju7Qg1GQO4mHZ/Kcs6Au7gfafgZnwm+a7sy/ow/tV1sHeraRUHbjdat8/UvDor4Tez+siGKDk6zIKtCPKVJA== + dependencies: + "@babel/compat-data" "^7.13.8" + "@babel/helper-validator-option" "^7.12.17" + browserslist "^4.14.5" + semver "^6.3.0" + +"@babel/helper-create-class-features-plugin@^7.13.0": + version "7.13.10" + resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.13.10.tgz#073b2bbb925a097643c6fc5770e5f13394e887c9" + integrity sha512-YV7r2YxdTUaw84EwNkyrRke/TJHR/UXGiyvACRqvdVJ2/syV2rQuJNnaRLSuYiop8cMRXOgseTGoJCWX0q2fFg== + dependencies: + "@babel/helper-function-name" "^7.12.13" + "@babel/helper-member-expression-to-functions" "^7.13.0" + "@babel/helper-optimise-call-expression" "^7.12.13" + "@babel/helper-replace-supers" "^7.13.0" + "@babel/helper-split-export-declaration" "^7.12.13" + +"@babel/helper-define-polyfill-provider@^0.1.5": + version "0.1.5" + resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.1.5.tgz#3c2f91b7971b9fc11fe779c945c014065dea340e" + integrity sha512-nXuzCSwlJ/WKr8qxzW816gwyT6VZgiJG17zR40fou70yfAcqjoNyTLl/DQ+FExw5Hx5KNqshmN8Ldl/r2N7cTg== + dependencies: + "@babel/helper-compilation-targets" "^7.13.0" + "@babel/helper-module-imports" "^7.12.13" + "@babel/helper-plugin-utils" "^7.13.0" + "@babel/traverse" "^7.13.0" + debug "^4.1.1" + lodash.debounce "^4.0.8" + resolve "^1.14.2" + semver "^6.1.2" + +"@babel/helper-function-name@^7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.12.13.tgz#93ad656db3c3c2232559fd7b2c3dbdcbe0eb377a" + integrity sha512-TZvmPn0UOqmvi5G4vvw0qZTpVptGkB1GL61R6lKvrSdIxGm5Pky7Q3fpKiIkQCAtRCBUwB0PaThlx9vebCDSwA== + dependencies: + "@babel/helper-get-function-arity" "^7.12.13" + "@babel/template" "^7.12.13" + "@babel/types" "^7.12.13" + +"@babel/helper-get-function-arity@^7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.12.13.tgz#bc63451d403a3b3082b97e1d8b3fe5bd4091e583" + integrity sha512-DjEVzQNz5LICkzN0REdpD5prGoidvbdYk1BVgRUOINaWJP2t6avB27X1guXK1kXNrX0WMfsrm1A/ZBthYuIMQg== + dependencies: + "@babel/types" "^7.12.13" + +"@babel/helper-member-expression-to-functions@^7.13.0": + version "7.13.0" + resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.13.0.tgz#6aa4bb678e0f8c22f58cdb79451d30494461b091" + integrity sha512-yvRf8Ivk62JwisqV1rFRMxiSMDGnN6KH1/mDMmIrij4jztpQNRoHqqMG3U6apYbGRPJpgPalhva9Yd06HlUxJQ== + dependencies: + "@babel/types" "^7.13.0" + +"@babel/helper-module-imports@^7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.12.13.tgz#ec67e4404f41750463e455cc3203f6a32e93fcb0" + integrity sha512-NGmfvRp9Rqxy0uHSSVP+SRIW1q31a7Ji10cLBcqSDUngGentY4FRiHOFZFE1CLU5eiL0oE8reH7Tg1y99TDM/g== + dependencies: + "@babel/types" "^7.12.13" + +"@babel/helper-module-transforms@^7.13.0": + version "7.13.0" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.13.0.tgz#42eb4bd8eea68bab46751212c357bfed8b40f6f1" + integrity sha512-Ls8/VBwH577+pw7Ku1QkUWIyRRNHpYlts7+qSqBBFCW3I8QteB9DxfcZ5YJpOwH6Ihe/wn8ch7fMGOP1OhEIvw== + dependencies: + "@babel/helper-module-imports" "^7.12.13" + "@babel/helper-replace-supers" "^7.13.0" + "@babel/helper-simple-access" "^7.12.13" + "@babel/helper-split-export-declaration" "^7.12.13" + "@babel/helper-validator-identifier" "^7.12.11" + "@babel/template" "^7.12.13" + "@babel/traverse" "^7.13.0" + "@babel/types" "^7.13.0" + lodash "^4.17.19" + +"@babel/helper-optimise-call-expression@^7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.12.13.tgz#5c02d171b4c8615b1e7163f888c1c81c30a2aaea" + integrity sha512-BdWQhoVJkp6nVjB7nkFWcn43dkprYauqtk++Py2eaf/GRDFm5BxRqEIZCiHlZUGAVmtwKcsVL1dC68WmzeFmiA== + dependencies: + "@babel/types" "^7.12.13" + +"@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.13.0", "@babel/helper-plugin-utils@^7.8.0": + version "7.13.0" + resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.13.0.tgz#806526ce125aed03373bc416a828321e3a6a33af" + integrity sha512-ZPafIPSwzUlAoWT8DKs1W2VyF2gOWthGd5NGFMsBcMMol+ZhK+EQY/e6V96poa6PA/Bh+C9plWN0hXO1uB8AfQ== + +"@babel/helper-replace-supers@^7.12.13", "@babel/helper-replace-supers@^7.13.0": + version "7.13.0" + resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.13.0.tgz#6034b7b51943094cb41627848cb219cb02be1d24" + integrity sha512-Segd5me1+Pz+rmN/NFBOplMbZG3SqRJOBlY+mA0SxAv6rjj7zJqr1AVr3SfzUVTLCv7ZLU5FycOM/SBGuLPbZw== + dependencies: + "@babel/helper-member-expression-to-functions" "^7.13.0" + "@babel/helper-optimise-call-expression" "^7.12.13" + "@babel/traverse" "^7.13.0" + "@babel/types" "^7.13.0" + +"@babel/helper-simple-access@^7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.12.13.tgz#8478bcc5cacf6aa1672b251c1d2dde5ccd61a6c4" + integrity sha512-0ski5dyYIHEfwpWGx5GPWhH35j342JaflmCeQmsPWcrOQDtCN6C1zKAVRFVbK53lPW2c9TsuLLSUDf0tIGJ5hA== + dependencies: + "@babel/types" "^7.12.13" + +"@babel/helper-skip-transparent-expression-wrappers@^7.12.1": + version "7.12.1" + resolved "https://registry.yarnpkg.com/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.12.1.tgz#462dc63a7e435ade8468385c63d2b84cce4b3cbf" + integrity sha512-Mf5AUuhG1/OCChOJ/HcADmvcHM42WJockombn8ATJG3OnyiSxBK/Mm5x78BQWvmtXZKHgbjdGL2kin/HOLlZGA== + dependencies: + "@babel/types" "^7.12.1" + +"@babel/helper-split-export-declaration@^7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.12.13.tgz#e9430be00baf3e88b0e13e6f9d4eaf2136372b05" + integrity sha512-tCJDltF83htUtXx5NLcaDqRmknv652ZWCHyoTETf1CXYJdPC7nohZohjUgieXhv0hTJdRf2FjDueFehdNucpzg== + dependencies: + "@babel/types" "^7.12.13" + +"@babel/helper-validator-identifier@^7.12.11", "@babel/helper-validator-identifier@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.18.6.tgz#9c97e30d31b2b8c72a1d08984f2ca9b574d7a076" + integrity sha512-MmetCkz9ej86nJQV+sFCxoGGrUbU3q02kgLciwkrt9QqEB7cP39oKEY0PakknEO0Gu20SskMRi+AYZ3b1TpN9g== + +"@babel/helper-validator-option@^7.12.17": + version "7.12.17" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.12.17.tgz#d1fbf012e1a79b7eebbfdc6d270baaf8d9eb9831" + integrity sha512-TopkMDmLzq8ngChwRlyjR6raKD6gMSae4JdYDB8bByKreQgG0RBTuKe9LRxW3wFtUnjxOPRKBDwEH6Mg5KeDfw== + +"@babel/helpers@^7.13.10": + version "7.13.10" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.13.10.tgz#fd8e2ba7488533cdeac45cc158e9ebca5e3c7df8" + integrity sha512-4VO883+MWPDUVRF3PhiLBUFHoX/bsLTGFpFK/HqvvfBZz2D57u9XzPVNFVBTc0PW/CWR9BXTOKt8NF4DInUHcQ== + dependencies: + "@babel/template" "^7.12.13" + "@babel/traverse" "^7.13.0" + "@babel/types" "^7.13.0" + +"@babel/highlight@^7.12.13", "@babel/highlight@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.18.6.tgz#81158601e93e2563795adcbfbdf5d64be3f2ecdf" + integrity sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g== + dependencies: + "@babel/helper-validator-identifier" "^7.18.6" + chalk "^2.0.0" + js-tokens "^4.0.0" + +"@babel/parser@7.12.16": + version "7.12.16" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.12.16.tgz#cc31257419d2c3189d394081635703f549fc1ed4" + integrity sha512-c/+u9cqV6F0+4Hpq01jnJO+GLp2DdT63ppz9Xa+6cHaajM9VFzK/iDXiKK65YtpeVwu+ctfS6iqlMqRgQRzeCw== + +"@babel/parser@^7.0.0", "@babel/parser@^7.12.13", "@babel/parser@^7.13.0", "@babel/parser@^7.13.10": + version "7.13.10" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.13.10.tgz#8f8f9bf7b3afa3eabd061f7a5bcdf4fec3c48409" + integrity sha512-0s7Mlrw9uTWkYua7xWr99Wpk2bnGa0ANleKfksYAES8LpWH4gW1OUr42vqKNf0us5UQNfru2wPqMqRITzq/SIQ== + +"@babel/plugin-proposal-class-properties@^7.0.0": + version "7.13.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.13.0.tgz#146376000b94efd001e57a40a88a525afaab9f37" + integrity sha512-KnTDjFNC1g+45ka0myZNvSBFLhNCLN+GeGYLDEA8Oq7MZ6yMgfLoIRh86GRT0FjtJhZw8JyUskP9uvj5pHM9Zg== + dependencies: + "@babel/helper-create-class-features-plugin" "^7.13.0" + "@babel/helper-plugin-utils" "^7.13.0" + +"@babel/plugin-proposal-object-rest-spread@^7.0.0": + version "7.13.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.13.8.tgz#5d210a4d727d6ce3b18f9de82cc99a3964eed60a" + integrity sha512-DhB2EuB1Ih7S3/IRX5AFVgZ16k3EzfRbq97CxAVI1KSYcW+lexV8VZb7G7L8zuPVSdQMRn0kiBpf/Yzu9ZKH0g== + dependencies: + "@babel/compat-data" "^7.13.8" + "@babel/helper-compilation-targets" "^7.13.8" + "@babel/helper-plugin-utils" "^7.13.0" + "@babel/plugin-syntax-object-rest-spread" "^7.8.3" + "@babel/plugin-transform-parameters" "^7.13.0" + +"@babel/plugin-syntax-class-properties@^7.0.0": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz#b5c987274c4a3a82b89714796931a6b53544ae10" + integrity sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA== + dependencies: + "@babel/helper-plugin-utils" "^7.12.13" + +"@babel/plugin-syntax-flow@^7.0.0", "@babel/plugin-syntax-flow@^7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-flow/-/plugin-syntax-flow-7.12.13.tgz#5df9962503c0a9c918381c929d51d4d6949e7e86" + integrity sha512-J/RYxnlSLXZLVR7wTRsozxKT8qbsx1mNKJzXEEjQ0Kjx1ZACcyHgbanNWNCFtc36IzuWhYWPpvJFFoexoOWFmA== + dependencies: + "@babel/helper-plugin-utils" "^7.12.13" + +"@babel/plugin-syntax-jsx@^7.0.0", "@babel/plugin-syntax-jsx@^7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.13.tgz#044fb81ebad6698fe62c478875575bcbb9b70f15" + integrity sha512-d4HM23Q1K7oq/SLNmG6mRt85l2csmQ0cHRaxRXjKW0YFdEXqlZ5kzFQKH5Uc3rDJECgu+yCRgPkG04Mm98R/1g== + dependencies: + "@babel/helper-plugin-utils" "^7.12.13" + +"@babel/plugin-syntax-object-rest-spread@^7.0.0", "@babel/plugin-syntax-object-rest-spread@^7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz#60e225edcbd98a640332a2e72dd3e66f1af55871" + integrity sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA== + dependencies: + "@babel/helper-plugin-utils" "^7.8.0" + +"@babel/plugin-transform-arrow-functions@^7.0.0": + version "7.13.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.13.0.tgz#10a59bebad52d637a027afa692e8d5ceff5e3dae" + integrity sha512-96lgJagobeVmazXFaDrbmCLQxBysKu7U6Do3mLsx27gf5Dk85ezysrs2BZUpXD703U/Su1xTBDxxar2oa4jAGg== + dependencies: + "@babel/helper-plugin-utils" "^7.13.0" + +"@babel/plugin-transform-block-scoped-functions@^7.0.0": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.12.13.tgz#a9bf1836f2a39b4eb6cf09967739de29ea4bf4c4" + integrity sha512-zNyFqbc3kI/fVpqwfqkg6RvBgFpC4J18aKKMmv7KdQ/1GgREapSJAykLMVNwfRGO3BtHj3YQZl8kxCXPcVMVeg== + dependencies: + "@babel/helper-plugin-utils" "^7.12.13" + +"@babel/plugin-transform-block-scoping@^7.0.0": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.12.13.tgz#f36e55076d06f41dfd78557ea039c1b581642e61" + integrity sha512-Pxwe0iqWJX4fOOM2kEZeUuAxHMWb9nK+9oh5d11bsLoB0xMg+mkDpt0eYuDZB7ETrY9bbcVlKUGTOGWy7BHsMQ== + dependencies: + "@babel/helper-plugin-utils" "^7.12.13" + +"@babel/plugin-transform-classes@^7.0.0": + version "7.13.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.13.0.tgz#0265155075c42918bf4d3a4053134176ad9b533b" + integrity sha512-9BtHCPUARyVH1oXGcSJD3YpsqRLROJx5ZNP6tN5vnk17N0SVf9WCtf8Nuh1CFmgByKKAIMstitKduoCmsaDK5g== + dependencies: + "@babel/helper-annotate-as-pure" "^7.12.13" + "@babel/helper-function-name" "^7.12.13" + "@babel/helper-optimise-call-expression" "^7.12.13" + "@babel/helper-plugin-utils" "^7.13.0" + "@babel/helper-replace-supers" "^7.13.0" + "@babel/helper-split-export-declaration" "^7.12.13" + globals "^11.1.0" + +"@babel/plugin-transform-computed-properties@^7.0.0": + version "7.13.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.13.0.tgz#845c6e8b9bb55376b1fa0b92ef0bdc8ea06644ed" + integrity sha512-RRqTYTeZkZAz8WbieLTvKUEUxZlUTdmL5KGMyZj7FnMfLNKV4+r5549aORG/mgojRmFlQMJDUupwAMiF2Q7OUg== + dependencies: + "@babel/helper-plugin-utils" "^7.13.0" + +"@babel/plugin-transform-destructuring@^7.0.0": + version "7.13.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.13.0.tgz#c5dce270014d4e1ebb1d806116694c12b7028963" + integrity sha512-zym5em7tePoNT9s964c0/KU3JPPnuq7VhIxPRefJ4/s82cD+q1mgKfuGRDMCPL0HTyKz4dISuQlCusfgCJ86HA== + dependencies: + "@babel/helper-plugin-utils" "^7.13.0" + +"@babel/plugin-transform-flow-strip-types@^7.0.0": + version "7.13.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-flow-strip-types/-/plugin-transform-flow-strip-types-7.13.0.tgz#58177a48c209971e8234e99906cb6bd1122addd3" + integrity sha512-EXAGFMJgSX8gxWD7PZtW/P6M+z74jpx3wm/+9pn+c2dOawPpBkUX7BrfyPvo6ZpXbgRIEuwgwDb/MGlKvu2pOg== + dependencies: + "@babel/helper-plugin-utils" "^7.13.0" + "@babel/plugin-syntax-flow" "^7.12.13" + +"@babel/plugin-transform-for-of@^7.0.0": + version "7.13.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.13.0.tgz#c799f881a8091ac26b54867a845c3e97d2696062" + integrity sha512-IHKT00mwUVYE0zzbkDgNRP6SRzvfGCYsOxIRz8KsiaaHCcT9BWIkO+H9QRJseHBLOGBZkHUdHiqj6r0POsdytg== + dependencies: + "@babel/helper-plugin-utils" "^7.13.0" + +"@babel/plugin-transform-function-name@^7.0.0": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.12.13.tgz#bb024452f9aaed861d374c8e7a24252ce3a50051" + integrity sha512-6K7gZycG0cmIwwF7uMK/ZqeCikCGVBdyP2J5SKNCXO5EOHcqi+z7Jwf8AmyDNcBgxET8DrEtCt/mPKPyAzXyqQ== + dependencies: + "@babel/helper-function-name" "^7.12.13" + "@babel/helper-plugin-utils" "^7.12.13" + +"@babel/plugin-transform-literals@^7.0.0": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.12.13.tgz#2ca45bafe4a820197cf315794a4d26560fe4bdb9" + integrity sha512-FW+WPjSR7hiUxMcKqyNjP05tQ2kmBCdpEpZHY1ARm96tGQCCBvXKnpjILtDplUnJ/eHZ0lALLM+d2lMFSpYJrQ== + dependencies: + "@babel/helper-plugin-utils" "^7.12.13" + +"@babel/plugin-transform-member-expression-literals@^7.0.0": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.12.13.tgz#5ffa66cd59b9e191314c9f1f803b938e8c081e40" + integrity sha512-kxLkOsg8yir4YeEPHLuO2tXP9R/gTjpuTOjshqSpELUN3ZAg2jfDnKUvzzJxObun38sw3wm4Uu69sX/zA7iRvg== + dependencies: + "@babel/helper-plugin-utils" "^7.12.13" + +"@babel/plugin-transform-modules-commonjs@^7.0.0": + version "7.13.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.13.8.tgz#7b01ad7c2dcf2275b06fa1781e00d13d420b3e1b" + integrity sha512-9QiOx4MEGglfYZ4XOnU79OHr6vIWUakIj9b4mioN8eQIoEh+pf5p/zEB36JpDFWA12nNMiRf7bfoRvl9Rn79Bw== + dependencies: + "@babel/helper-module-transforms" "^7.13.0" + "@babel/helper-plugin-utils" "^7.13.0" + "@babel/helper-simple-access" "^7.12.13" + babel-plugin-dynamic-import-node "^2.3.3" + +"@babel/plugin-transform-object-super@^7.0.0": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.12.13.tgz#b4416a2d63b8f7be314f3d349bd55a9c1b5171f7" + integrity sha512-JzYIcj3XtYspZDV8j9ulnoMPZZnF/Cj0LUxPOjR89BdBVx+zYJI9MdMIlUZjbXDX+6YVeS6I3e8op+qQ3BYBoQ== + dependencies: + "@babel/helper-plugin-utils" "^7.12.13" + "@babel/helper-replace-supers" "^7.12.13" + +"@babel/plugin-transform-parameters@^7.0.0", "@babel/plugin-transform-parameters@^7.13.0": + version "7.13.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.13.0.tgz#8fa7603e3097f9c0b7ca1a4821bc2fb52e9e5007" + integrity sha512-Jt8k/h/mIwE2JFEOb3lURoY5C85ETcYPnbuAJ96zRBzh1XHtQZfs62ChZ6EP22QlC8c7Xqr9q+e1SU5qttwwjw== + dependencies: + "@babel/helper-plugin-utils" "^7.13.0" + +"@babel/plugin-transform-property-literals@^7.0.0": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.12.13.tgz#4e6a9e37864d8f1b3bc0e2dce7bf8857db8b1a81" + integrity sha512-nqVigwVan+lR+g8Fj8Exl0UQX2kymtjcWfMOYM1vTYEKujeyv2SkMgazf2qNcK7l4SDiKyTA/nHCPqL4e2zo1A== + dependencies: + "@babel/helper-plugin-utils" "^7.12.13" + +"@babel/plugin-transform-react-display-name@^7.0.0": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.12.13.tgz#c28effd771b276f4647411c9733dbb2d2da954bd" + integrity sha512-MprESJzI9O5VnJZrL7gg1MpdqmiFcUv41Jc7SahxYsNP2kDkFqClxxTZq+1Qv4AFCamm+GXMRDQINNn+qrxmiA== + dependencies: + "@babel/helper-plugin-utils" "^7.12.13" + +"@babel/plugin-transform-react-jsx@^7.0.0": + version "7.12.17" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.12.17.tgz#dd2c1299f5e26de584939892de3cfc1807a38f24" + integrity sha512-mwaVNcXV+l6qJOuRhpdTEj8sT/Z0owAVWf9QujTZ0d2ye9X/K+MTOTSizcgKOj18PGnTc/7g1I4+cIUjsKhBcw== + dependencies: + "@babel/helper-annotate-as-pure" "^7.12.13" + "@babel/helper-module-imports" "^7.12.13" + "@babel/helper-plugin-utils" "^7.12.13" + "@babel/plugin-syntax-jsx" "^7.12.13" + "@babel/types" "^7.12.17" + +"@babel/plugin-transform-runtime@^7.5.5": + version "7.13.10" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.13.10.tgz#a1e40d22e2bf570c591c9c7e5ab42d6bf1e419e1" + integrity sha512-Y5k8ipgfvz5d/76tx7JYbKQTcgFSU6VgJ3kKQv4zGTKr+a9T/KBvfRvGtSFgKDQGt/DBykQixV0vNWKIdzWErA== + dependencies: + "@babel/helper-module-imports" "^7.12.13" + "@babel/helper-plugin-utils" "^7.13.0" + babel-plugin-polyfill-corejs2 "^0.1.4" + babel-plugin-polyfill-corejs3 "^0.1.3" + babel-plugin-polyfill-regenerator "^0.1.2" + semver "^6.3.0" + +"@babel/plugin-transform-shorthand-properties@^7.0.0": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.12.13.tgz#db755732b70c539d504c6390d9ce90fe64aff7ad" + integrity sha512-xpL49pqPnLtf0tVluuqvzWIgLEhuPpZzvs2yabUHSKRNlN7ScYU7aMlmavOeyXJZKgZKQRBlh8rHbKiJDraTSw== + dependencies: + "@babel/helper-plugin-utils" "^7.12.13" + +"@babel/plugin-transform-spread@^7.0.0": + version "7.13.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.13.0.tgz#84887710e273c1815ace7ae459f6f42a5d31d5fd" + integrity sha512-V6vkiXijjzYeFmQTr3dBxPtZYLPcUfY34DebOU27jIl2M/Y8Egm52Hw82CSjjPqd54GTlJs5x+CR7HeNr24ckg== + dependencies: + "@babel/helper-plugin-utils" "^7.13.0" + "@babel/helper-skip-transparent-expression-wrappers" "^7.12.1" + +"@babel/plugin-transform-template-literals@^7.0.0": + version "7.13.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.13.0.tgz#a36049127977ad94438dee7443598d1cefdf409d" + integrity sha512-d67umW6nlfmr1iehCcBv69eSUSySk1EsIS8aTDX4Xo9qajAh6mYtcl4kJrBkGXuxZPEgVr7RVfAvNW6YQkd4Mw== + dependencies: + "@babel/helper-plugin-utils" "^7.13.0" + +"@babel/runtime@^7.0.0", "@babel/runtime@^7.11.2", "@babel/runtime@^7.5.5", "@babel/runtime@^7.6.3": + version "7.13.10" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.13.10.tgz#47d42a57b6095f4468da440388fdbad8bebf0d7d" + integrity sha512-4QPkjJq6Ns3V/RgpEahRk+AGfL0eO6RHHtTWoNNr5mO49G6B5+X6d6THgWEAvTrznU5xYpbAlVKRYcsCgh/Akw== + dependencies: + regenerator-runtime "^0.13.4" + +"@babel/runtime@^7.9.2": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.18.9.tgz#b4fcfce55db3d2e5e080d2490f608a3b9f407f4a" + integrity sha512-lkqXDcvlFT5rvEjiu6+QYO+1GXrEHRo2LOtS7E4GtX5ESIZOgepqsZBVIj6Pv+a6zqsya9VCgiK1KAK4BvJDAw== + dependencies: + regenerator-runtime "^0.13.4" + +"@babel/template@^7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.12.13.tgz#530265be8a2589dbb37523844c5bcb55947fb327" + integrity sha512-/7xxiGA57xMo/P2GVvdEumr8ONhFOhfgq2ihK3h1e6THqzTAkHbkXgB0xI9yeTfIUoH3+oAeHhqm/I43OTbbjA== + dependencies: + "@babel/code-frame" "^7.12.13" + "@babel/parser" "^7.12.13" + "@babel/types" "^7.12.13" + +"@babel/traverse@7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.12.13.tgz#689f0e4b4c08587ad26622832632735fb8c4e0c0" + integrity sha512-3Zb4w7eE/OslI0fTp8c7b286/cQps3+vdLW3UcwC8VSJC6GbKn55aeVVu2QJNuCDoeKyptLOFrPq8WqZZBodyA== + dependencies: + "@babel/code-frame" "^7.12.13" + "@babel/generator" "^7.12.13" + "@babel/helper-function-name" "^7.12.13" + "@babel/helper-split-export-declaration" "^7.12.13" + "@babel/parser" "^7.12.13" + "@babel/types" "^7.12.13" + debug "^4.1.0" + globals "^11.1.0" + lodash "^4.17.19" + +"@babel/traverse@^7.0.0", "@babel/traverse@^7.13.0": + version "7.13.0" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.13.0.tgz#6d95752475f86ee7ded06536de309a65fc8966cc" + integrity sha512-xys5xi5JEhzC3RzEmSGrs/b3pJW/o87SypZ+G/PhaE7uqVQNv/jlmVIBXuoh5atqQ434LfXV+sf23Oxj0bchJQ== + dependencies: + "@babel/code-frame" "^7.12.13" + "@babel/generator" "^7.13.0" + "@babel/helper-function-name" "^7.12.13" + "@babel/helper-split-export-declaration" "^7.12.13" + "@babel/parser" "^7.13.0" + "@babel/types" "^7.13.0" + debug "^4.1.0" + globals "^11.1.0" + lodash "^4.17.19" + +"@babel/types@7.12.13": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.12.13.tgz#8be1aa8f2c876da11a9cf650c0ecf656913ad611" + integrity sha512-oKrdZTld2im1z8bDwTOQvUbxKwE+854zc16qWZQlcTqMN00pWxHQ4ZeOq0yDMnisOpRykH2/5Qqcrk/OlbAjiQ== + dependencies: + "@babel/helper-validator-identifier" "^7.12.11" + lodash "^4.17.19" + to-fast-properties "^2.0.0" + +"@babel/types@^7.0.0", "@babel/types@^7.12.1", "@babel/types@^7.12.13", "@babel/types@^7.12.17", "@babel/types@^7.13.0": + version "7.13.0" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.13.0.tgz#74424d2816f0171b4100f0ab34e9a374efdf7f80" + integrity sha512-hE+HE8rnG1Z6Wzo+MhaKE5lM5eMx71T4EHJgku2E3xIfaULhDcxiiRxUYgwX8qwP1BBSlag+TdGOt6JAidIZTA== + dependencies: + "@babel/helper-validator-identifier" "^7.12.11" + lodash "^4.17.19" + to-fast-properties "^2.0.0" + +"@ethersproject/abi@5.0.0-beta.153": + version "5.0.0-beta.153" + resolved "https://registry.yarnpkg.com/@ethersproject/abi/-/abi-5.0.0-beta.153.tgz#43a37172b33794e4562999f6e2d555b7599a8eee" + integrity sha512-aXweZ1Z7vMNzJdLpR1CZUAIgnwjrZeUSvN9syCwlBaEBUFJmFY+HHnfuTI5vIhVs/mRkfJVrbEyl51JZQqyjAg== + dependencies: + "@ethersproject/address" ">=5.0.0-beta.128" + "@ethersproject/bignumber" ">=5.0.0-beta.130" + "@ethersproject/bytes" ">=5.0.0-beta.129" + "@ethersproject/constants" ">=5.0.0-beta.128" + "@ethersproject/hash" ">=5.0.0-beta.128" + "@ethersproject/keccak256" ">=5.0.0-beta.127" + "@ethersproject/logger" ">=5.0.0-beta.129" + "@ethersproject/properties" ">=5.0.0-beta.131" + "@ethersproject/strings" ">=5.0.0-beta.130" + +"@ethersproject/abi@5.0.7": + version "5.0.7" + resolved "https://registry.yarnpkg.com/@ethersproject/abi/-/abi-5.0.7.tgz#79e52452bd3ca2956d0e1c964207a58ad1a0ee7b" + integrity sha512-Cqktk+hSIckwP/W8O47Eef60VwmoSC/L3lY0+dIBhQPCNn9E4V7rwmm2aFrNRRDJfFlGuZ1khkQUOc3oBX+niw== + dependencies: + "@ethersproject/address" "^5.0.4" + "@ethersproject/bignumber" "^5.0.7" + "@ethersproject/bytes" "^5.0.4" + "@ethersproject/constants" "^5.0.4" + "@ethersproject/hash" "^5.0.4" + "@ethersproject/keccak256" "^5.0.3" + "@ethersproject/logger" "^5.0.5" + "@ethersproject/properties" "^5.0.3" + "@ethersproject/strings" "^5.0.4" + +"@ethersproject/abstract-provider@^5.7.0": + version "5.7.0" + resolved "https://registry.yarnpkg.com/@ethersproject/abstract-provider/-/abstract-provider-5.7.0.tgz#b0a8550f88b6bf9d51f90e4795d48294630cb9ef" + integrity sha512-R41c9UkchKCpAqStMYUpdunjo3pkEvZC3FAwZn5S5MGbXoMQOHIdHItezTETxAO5bevtMApSyEhn9+CHcDsWBw== + dependencies: + "@ethersproject/bignumber" "^5.7.0" + "@ethersproject/bytes" "^5.7.0" + "@ethersproject/logger" "^5.7.0" + "@ethersproject/networks" "^5.7.0" + "@ethersproject/properties" "^5.7.0" + "@ethersproject/transactions" "^5.7.0" + "@ethersproject/web" "^5.7.0" + +"@ethersproject/abstract-signer@^5.0.10", "@ethersproject/abstract-signer@^5.7.0": + version "5.7.0" + resolved "https://registry.yarnpkg.com/@ethersproject/abstract-signer/-/abstract-signer-5.7.0.tgz#13f4f32117868452191a4649723cb086d2b596b2" + integrity sha512-a16V8bq1/Cz+TGCkE2OPMTOUDLS3grCpdjoJCYNnVBbdYEMSgKrU0+B90s8b6H+ByYTBZN7a3g76jdIJi7UfKQ== + dependencies: + "@ethersproject/abstract-provider" "^5.7.0" + "@ethersproject/bignumber" "^5.7.0" + "@ethersproject/bytes" "^5.7.0" + "@ethersproject/logger" "^5.7.0" + "@ethersproject/properties" "^5.7.0" + +"@ethersproject/address@>=5.0.0-beta.128": + version "5.0.11" + resolved "https://registry.yarnpkg.com/@ethersproject/address/-/address-5.0.11.tgz#12022e8c590c33939beb5ab18b401ecf585eac59" + integrity sha512-Et4GBdD8/tsBGjCEOKee9upN29qjL5kbRcmJifb4Penmiuh9GARXL2/xpXvEp5EW+EIW/rfCHFJrkYBgoQFQBw== + dependencies: + "@ethersproject/bignumber" "^5.0.13" + "@ethersproject/bytes" "^5.0.9" + "@ethersproject/keccak256" "^5.0.7" + "@ethersproject/logger" "^5.0.8" + "@ethersproject/rlp" "^5.0.7" + +"@ethersproject/address@^5.0.4", "@ethersproject/address@^5.0.9", "@ethersproject/address@^5.7.0": + version "5.7.0" + resolved "https://registry.yarnpkg.com/@ethersproject/address/-/address-5.7.0.tgz#19b56c4d74a3b0a46bfdbb6cfcc0a153fc697f37" + integrity sha512-9wYhYt7aghVGo758POM5nqcOMaE168Q6aRLJZwUmiqSrAungkG74gSSeKEIR7ukixesdRZGPgVqme6vmxs1fkA== + dependencies: + "@ethersproject/bignumber" "^5.7.0" + "@ethersproject/bytes" "^5.7.0" + "@ethersproject/keccak256" "^5.7.0" + "@ethersproject/logger" "^5.7.0" + "@ethersproject/rlp" "^5.7.0" + +"@ethersproject/base64@^5.7.0": + version "5.7.0" + resolved "https://registry.yarnpkg.com/@ethersproject/base64/-/base64-5.7.0.tgz#ac4ee92aa36c1628173e221d0d01f53692059e1c" + integrity sha512-Dr8tcHt2mEbsZr/mwTPIQAf3Ai0Bks/7gTw9dSqk1mQvhW3XvRlmDJr/4n+wg1JmCl16NZue17CDh8xb/vZ0sQ== + dependencies: + "@ethersproject/bytes" "^5.7.0" + +"@ethersproject/bignumber@>=5.0.0-beta.130": + version "5.0.15" + resolved "https://registry.yarnpkg.com/@ethersproject/bignumber/-/bignumber-5.0.15.tgz#b089b3f1e0381338d764ac1c10512f0c93b184ed" + integrity sha512-MTADqnyacvdRwtKh7o9ujwNDSM1SDJjYDMYAzjIgjoi9rh6TY4suMbhCa3i2vh3SUXiXSICyTI8ui+NPdrZ9Lw== + dependencies: + "@ethersproject/bytes" "^5.0.9" + "@ethersproject/logger" "^5.0.8" + bn.js "^4.4.0" + +"@ethersproject/bignumber@^5.0.13", "@ethersproject/bignumber@^5.0.7", "@ethersproject/bignumber@^5.7.0": + version "5.7.0" + resolved "https://registry.yarnpkg.com/@ethersproject/bignumber/-/bignumber-5.7.0.tgz#e2f03837f268ba655ffba03a57853e18a18dc9c2" + integrity sha512-n1CAdIHRWjSucQO3MC1zPSVgV/6dy/fjL9pMrPP9peL+QxEg9wOsVqwD4+818B6LUEtaXzVHQiuivzRoxPxUGw== + dependencies: + "@ethersproject/bytes" "^5.7.0" + "@ethersproject/logger" "^5.7.0" + bn.js "^5.2.1" + +"@ethersproject/bytes@>=5.0.0-beta.129": + version "5.0.11" + resolved "https://registry.yarnpkg.com/@ethersproject/bytes/-/bytes-5.0.11.tgz#21118e75b1d00db068984c15530e316021101276" + integrity sha512-D51plLYY5qF05AsoVQwIZVLqlBkaTPVHVP/1WmmBIWyHB0cRW0C9kh0kx5Exo51rB63Hk8PfHxc7SmpoaQFEyg== + dependencies: + "@ethersproject/logger" "^5.0.8" + +"@ethersproject/bytes@^5.0.4", "@ethersproject/bytes@^5.0.9", "@ethersproject/bytes@^5.7.0": + version "5.7.0" + resolved "https://registry.yarnpkg.com/@ethersproject/bytes/-/bytes-5.7.0.tgz#a00f6ea8d7e7534d6d87f47188af1148d71f155d" + integrity sha512-nsbxwgFXWh9NyYWo+U8atvmMsSdKJprTcICAkvbBffT75qDocbuggBU0SJiVK2MuTrp0q+xvLkTnGMPK1+uA9A== + dependencies: + "@ethersproject/logger" "^5.7.0" + +"@ethersproject/constants@>=5.0.0-beta.128": + version "5.0.10" + resolved "https://registry.yarnpkg.com/@ethersproject/constants/-/constants-5.0.10.tgz#eb0c604fbc44c53ba9641eed31a1d0c9e1ebcadc" + integrity sha512-OSo8jxkHLDXieCy8bgOFR7lMfgPxEzKvSDdP+WAWHCDM8+orwch0B6wzkTmiQFgryAtIctrBt5glAdJikZ3hGw== + dependencies: + "@ethersproject/bignumber" "^5.0.13" + +"@ethersproject/constants@^5.0.4", "@ethersproject/constants@^5.0.8", "@ethersproject/constants@^5.7.0": + version "5.7.0" + resolved "https://registry.yarnpkg.com/@ethersproject/constants/-/constants-5.7.0.tgz#df80a9705a7e08984161f09014ea012d1c75295e" + integrity sha512-DHI+y5dBNvkpYUMiRQyxRBYBefZkJfo70VUkUAsRjcPs47muV9evftfZ0PJVCXYbAiCgght0DtcF9srFQmIgWA== + dependencies: + "@ethersproject/bignumber" "^5.7.0" + +"@ethersproject/hash@>=5.0.0-beta.128": + version "5.0.12" + resolved "https://registry.yarnpkg.com/@ethersproject/hash/-/hash-5.0.12.tgz#1074599f7509e2ca2bb7a3d4f4e39ab3a796da42" + integrity sha512-kn4QN+fhNFbUgX3XZTZUaQixi0oyfIEY+hfW+KtkHu+rq7dV76oAIvaLEEynu1/4npOL38E4X4YI42gGZk+C0Q== + dependencies: + "@ethersproject/abstract-signer" "^5.0.10" + "@ethersproject/address" "^5.0.9" + "@ethersproject/bignumber" "^5.0.13" + "@ethersproject/bytes" "^5.0.9" + "@ethersproject/keccak256" "^5.0.7" + "@ethersproject/logger" "^5.0.8" + "@ethersproject/properties" "^5.0.7" + "@ethersproject/strings" "^5.0.8" + +"@ethersproject/hash@^5.0.4": + version "5.7.0" + resolved "https://registry.yarnpkg.com/@ethersproject/hash/-/hash-5.7.0.tgz#eb7aca84a588508369562e16e514b539ba5240a7" + integrity sha512-qX5WrQfnah1EFnO5zJv1v46a8HW0+E5xuBBDTwMFZLuVTx0tbU2kkx15NqdjxecrLGatQN9FGQKpb1FKdHCt+g== + dependencies: + "@ethersproject/abstract-signer" "^5.7.0" + "@ethersproject/address" "^5.7.0" + "@ethersproject/base64" "^5.7.0" + "@ethersproject/bignumber" "^5.7.0" + "@ethersproject/bytes" "^5.7.0" + "@ethersproject/keccak256" "^5.7.0" + "@ethersproject/logger" "^5.7.0" + "@ethersproject/properties" "^5.7.0" + "@ethersproject/strings" "^5.7.0" + +"@ethersproject/keccak256@>=5.0.0-beta.127": + version "5.0.9" + resolved "https://registry.yarnpkg.com/@ethersproject/keccak256/-/keccak256-5.0.9.tgz#ca0d86e4af56c13b1ef25e533bde3e96d28f647d" + integrity sha512-zhdUTj6RGtCJSgU+bDrWF6cGbvW453LoIC1DSNWrTlXzC7WuH4a+EiPrgc7/kNoRxerKuA/cxYlI8GwNtVtDlw== + dependencies: + "@ethersproject/bytes" "^5.0.9" + js-sha3 "0.5.7" + +"@ethersproject/keccak256@^5.0.3", "@ethersproject/keccak256@^5.0.7", "@ethersproject/keccak256@^5.7.0": + version "5.7.0" + resolved "https://registry.yarnpkg.com/@ethersproject/keccak256/-/keccak256-5.7.0.tgz#3186350c6e1cd6aba7940384ec7d6d9db01f335a" + integrity sha512-2UcPboeL/iW+pSg6vZ6ydF8tCnv3Iu/8tUmLLzWWGzxWKFFqOBQFLo6uLUv6BDrLgCDfN28RJ/wtByx+jZ4KBg== + dependencies: + "@ethersproject/bytes" "^5.7.0" + js-sha3 "0.8.0" + +"@ethersproject/logger@>=5.0.0-beta.129": + version "5.0.10" + resolved "https://registry.yarnpkg.com/@ethersproject/logger/-/logger-5.0.10.tgz#fd884688b3143253e0356ef92d5f22d109d2e026" + integrity sha512-0y2T2NqykDrbPM3Zw9RSbPkDOxwChAL8detXaom76CfYoGxsOnRP/zTX8OUAV+x9LdwzgbWvWmeXrc0M7SuDZw== + +"@ethersproject/logger@^5.0.5", "@ethersproject/logger@^5.0.8", "@ethersproject/logger@^5.7.0": + version "5.7.0" + resolved "https://registry.yarnpkg.com/@ethersproject/logger/-/logger-5.7.0.tgz#6ce9ae168e74fecf287be17062b590852c311892" + integrity sha512-0odtFdXu/XHtjQXJYA3u9G0G8btm0ND5Cu8M7i5vhEcE8/HmF4Lbdqanwyv4uQTr2tx6b7fQRmgLrsnpQlmnig== + +"@ethersproject/networks@^5.7.0": + version "5.7.0" + resolved "https://registry.yarnpkg.com/@ethersproject/networks/-/networks-5.7.0.tgz#df72a392f1a63a57f87210515695a31a245845ad" + integrity sha512-MG6oHSQHd4ebvJrleEQQ4HhVu8Ichr0RDYEfHzsVAVjHNM+w36x9wp9r+hf1JstMXtseXDtkiVoARAG6M959AA== + dependencies: + "@ethersproject/logger" "^5.7.0" + +"@ethersproject/properties@>=5.0.0-beta.131": + version "5.0.9" + resolved "https://registry.yarnpkg.com/@ethersproject/properties/-/properties-5.0.9.tgz#d7aae634680760136ea522e25c3ef043ec15b5c2" + integrity sha512-ZCjzbHYTw+rF1Pn8FDCEmx3gQttwIHcm/6Xee8g/M3Ga3SfW4tccNMbs5zqnBH0E4RoOPaeNgyg1O68TaF0tlg== + dependencies: + "@ethersproject/logger" "^5.0.8" + +"@ethersproject/properties@^5.0.3", "@ethersproject/properties@^5.0.7", "@ethersproject/properties@^5.7.0": + version "5.7.0" + resolved "https://registry.yarnpkg.com/@ethersproject/properties/-/properties-5.7.0.tgz#a6e12cb0439b878aaf470f1902a176033067ed30" + integrity sha512-J87jy8suntrAkIZtecpxEPxY//szqr1mlBaYlQ0r4RCaiD2hjheqF9s1LVE8vVuJCXisjIP+JgtK/Do54ej4Sw== + dependencies: + "@ethersproject/logger" "^5.7.0" + +"@ethersproject/rlp@^5.0.7", "@ethersproject/rlp@^5.7.0": + version "5.7.0" + resolved "https://registry.yarnpkg.com/@ethersproject/rlp/-/rlp-5.7.0.tgz#de39e4d5918b9d74d46de93af80b7685a9c21304" + integrity sha512-rBxzX2vK8mVF7b0Tol44t5Tb8gomOHkj5guL+HhzQ1yBh/ydjGnpw6at+X6Iw0Kp3OzzzkcKp8N9r0W4kYSs9w== + dependencies: + "@ethersproject/bytes" "^5.7.0" + "@ethersproject/logger" "^5.7.0" + +"@ethersproject/signing-key@^5.0.8", "@ethersproject/signing-key@^5.7.0": + version "5.7.0" + resolved "https://registry.yarnpkg.com/@ethersproject/signing-key/-/signing-key-5.7.0.tgz#06b2df39411b00bc57c7c09b01d1e41cf1b16ab3" + integrity sha512-MZdy2nL3wO0u7gkB4nA/pEf8lu1TlFswPNmy8AiYkfKTdO6eXBJyUdmHO/ehm/htHw9K/qF8ujnTyUAD+Ry54Q== + dependencies: + "@ethersproject/bytes" "^5.7.0" + "@ethersproject/logger" "^5.7.0" + "@ethersproject/properties" "^5.7.0" + bn.js "^5.2.1" + elliptic "6.5.4" + hash.js "1.1.7" + +"@ethersproject/strings@>=5.0.0-beta.130": + version "5.0.10" + resolved "https://registry.yarnpkg.com/@ethersproject/strings/-/strings-5.0.10.tgz#ddce1e9724f4ac4f3f67e0cac0b48748e964bfdb" + integrity sha512-KAeoS1tZ9/5ECXiIZA6S6hywbD0so2VmuW+Wfyo5EDXeyZ6Na1nxTPhTnW7voQmjbeYJffCrOc0qLFJeylyg7w== + dependencies: + "@ethersproject/bytes" "^5.0.9" + "@ethersproject/constants" "^5.0.8" + "@ethersproject/logger" "^5.0.8" + +"@ethersproject/strings@^5.0.4", "@ethersproject/strings@^5.0.8", "@ethersproject/strings@^5.7.0": + version "5.7.0" + resolved "https://registry.yarnpkg.com/@ethersproject/strings/-/strings-5.7.0.tgz#54c9d2a7c57ae8f1205c88a9d3a56471e14d5ed2" + integrity sha512-/9nu+lj0YswRNSH0NXYqrh8775XNyEdUQAuf3f+SmOrnVewcJ5SBNAjF7lpgehKi4abvNNXyf+HX86czCdJ8Mg== + dependencies: + "@ethersproject/bytes" "^5.7.0" + "@ethersproject/constants" "^5.7.0" + "@ethersproject/logger" "^5.7.0" + +"@ethersproject/transactions@^5.0.0-beta.135": + version "5.0.11" + resolved "https://registry.yarnpkg.com/@ethersproject/transactions/-/transactions-5.0.11.tgz#b31df5292f47937136a45885d6ee6112477c13df" + integrity sha512-ftsRvR9+gQp7L63F6+XmstvsZ4w8GtWvQB08e/zB+oB86Fnhq8+i/tkgpJplSHC8I/qgiCisva+M3u2GVhDFPA== + dependencies: + "@ethersproject/address" "^5.0.9" + "@ethersproject/bignumber" "^5.0.13" + "@ethersproject/bytes" "^5.0.9" + "@ethersproject/constants" "^5.0.8" + "@ethersproject/keccak256" "^5.0.7" + "@ethersproject/logger" "^5.0.8" + "@ethersproject/properties" "^5.0.7" + "@ethersproject/rlp" "^5.0.7" + "@ethersproject/signing-key" "^5.0.8" + +"@ethersproject/transactions@^5.7.0": + version "5.7.0" + resolved "https://registry.yarnpkg.com/@ethersproject/transactions/-/transactions-5.7.0.tgz#91318fc24063e057885a6af13fdb703e1f993d3b" + integrity sha512-kmcNicCp1lp8qanMTC3RIikGgoJ80ztTyvtsFvCYpSCfkjhD0jZ2LOrnbcuxuToLIUYYf+4XwD1rP+B/erDIhQ== + dependencies: + "@ethersproject/address" "^5.7.0" + "@ethersproject/bignumber" "^5.7.0" + "@ethersproject/bytes" "^5.7.0" + "@ethersproject/constants" "^5.7.0" + "@ethersproject/keccak256" "^5.7.0" + "@ethersproject/logger" "^5.7.0" + "@ethersproject/properties" "^5.7.0" + "@ethersproject/rlp" "^5.7.0" + "@ethersproject/signing-key" "^5.7.0" + +"@ethersproject/web@^5.7.0": + version "5.7.0" + resolved "https://registry.yarnpkg.com/@ethersproject/web/-/web-5.7.0.tgz#40850c05260edad8b54827923bbad23d96aac0bc" + integrity sha512-ApHcbbj+muRASVDSCl/tgxaH2LBkRMEYfLOLVa0COipx0+nlu0QKet7U2lEg0vdkh8XRSLf2nd1f1Uk9SrVSGA== + dependencies: + "@ethersproject/base64" "^5.7.0" + "@ethersproject/bytes" "^5.7.0" + "@ethersproject/logger" "^5.7.0" + "@ethersproject/properties" "^5.7.0" + "@ethersproject/strings" "^5.7.0" + +"@graphprotocol/graph-cli@https://github.com/graphprotocol/graph-cli#main": + version "0.33.0" + resolved "https://github.com/graphprotocol/graph-cli#47e075a9701680580e0e8e09c5444963224dbf5c" + dependencies: + assemblyscript "0.19.10" + binary-install-raw "0.0.13" + chalk "3.0.0" + chokidar "3.5.1" + debug "4.3.1" + docker-compose "0.23.4" + dockerode "2.5.8" + fs-extra "9.0.0" + glob "7.1.6" + gluegun "https://github.com/edgeandnode/gluegun#v4.3.1-pin-colors-dep" + graphql "15.5.0" + immutable "3.8.2" + ipfs-http-client "34.0.0" + jayson "3.6.6" + js-yaml "3.13.1" + node-fetch "2.6.0" + pkginfo "0.4.1" + prettier "1.19.1" + request "2.88.2" + semver "7.3.5" + sync-request "6.1.0" + tmp-promise "3.0.2" + web3-eth-abi "1.7.0" + which "2.0.2" + yaml "1.9.2" + +"@graphprotocol/graph-ts@https://github.com/graphprotocol/graph-ts#main": + version "0.28.1" + resolved "https://github.com/graphprotocol/graph-ts#4e91d2c0b695c7689aba205516d3e80fb5588454" + dependencies: + assemblyscript "0.19.10" + +"@graphql-tools/batch-delegate@^6.2.4", "@graphql-tools/batch-delegate@^6.2.6": + version "6.2.6" + resolved "https://registry.yarnpkg.com/@graphql-tools/batch-delegate/-/batch-delegate-6.2.6.tgz#fbea98dc825f87ef29ea5f3f371912c2a2aa2f2c" + integrity sha512-QUoE9pQtkdNPFdJHSnBhZtUfr3M7pIRoXoMR+TG7DK2Y62ISKbT/bKtZEUU1/2v5uqd5WVIvw9dF8gHDSJAsSA== + dependencies: + "@graphql-tools/delegate" "^6.2.4" + dataloader "2.0.0" + tslib "~2.0.1" + +"@graphql-tools/batch-execute@^7.0.0": + version "7.0.0" + resolved "https://registry.yarnpkg.com/@graphql-tools/batch-execute/-/batch-execute-7.0.0.tgz#e79d11bd5b39f29172f6ec2eafa71103c6a6c85b" + integrity sha512-+ywPfK6N2Ddna6oOa5Qb1Mv7EA8LOwRNOAPP9dL37FEhksJM9pYqPSceUcqMqg7S9b0+Cgr78s408rgvurV3/Q== + dependencies: + "@graphql-tools/utils" "^7.0.0" + dataloader "2.0.0" + is-promise "4.0.0" + tslib "~2.0.1" + +"@graphql-tools/code-file-loader@^6.2.4": + version "6.3.1" + resolved "https://registry.yarnpkg.com/@graphql-tools/code-file-loader/-/code-file-loader-6.3.1.tgz#42dfd4db5b968acdb453382f172ec684fa0c34ed" + integrity sha512-ZJimcm2ig+avgsEOWWVvAaxZrXXhiiSZyYYOJi0hk9wh5BxZcLUNKkTp6EFnZE/jmGUwuos3pIjUD3Hwi3Bwhg== + dependencies: + "@graphql-tools/graphql-tag-pluck" "^6.5.1" + "@graphql-tools/utils" "^7.0.0" + tslib "~2.1.0" + +"@graphql-tools/delegate@^6.2.4": + version "6.2.4" + resolved "https://registry.yarnpkg.com/@graphql-tools/delegate/-/delegate-6.2.4.tgz#db553b63eb9512d5eb5bbfdfcd8cb1e2b534699c" + integrity sha512-mXe6DfoWmq49kPcDrpKHgC2DSWcD5q0YCaHHoXYPAOlnLH8VMTY8BxcE8y/Do2eyg+GLcwAcrpffVszWMwqw0w== + dependencies: + "@ardatan/aggregate-error" "0.0.6" + "@graphql-tools/schema" "^6.2.4" + "@graphql-tools/utils" "^6.2.4" + dataloader "2.0.0" + is-promise "4.0.0" + tslib "~2.0.1" + +"@graphql-tools/delegate@^7.0.1", "@graphql-tools/delegate@^7.0.7": + version "7.0.10" + resolved "https://registry.yarnpkg.com/@graphql-tools/delegate/-/delegate-7.0.10.tgz#f87ac85a2dbd03b5b3aabf347f4479fabe8ceac3" + integrity sha512-6Di9ia5ohoDvrHuhj2cak1nJGhIefJmUsd3WKZcJ2nu2yZAFawWMxGvQImqv3N7iyaWKiVhrrK8Roi/JrYhdKg== + dependencies: + "@ardatan/aggregate-error" "0.0.6" + "@graphql-tools/batch-execute" "^7.0.0" + "@graphql-tools/schema" "^7.0.0" + "@graphql-tools/utils" "^7.1.6" + dataloader "2.0.0" + is-promise "4.0.0" + tslib "~2.1.0" + +"@graphql-tools/git-loader@^6.2.4": + version "6.2.6" + resolved "https://registry.yarnpkg.com/@graphql-tools/git-loader/-/git-loader-6.2.6.tgz#c2226f4b8f51f1c05c9ab2649ba32d49c68cd077" + integrity sha512-ooQTt2CaG47vEYPP3CPD+nbA0F+FYQXfzrB1Y1ABN9K3d3O2RK3g8qwslzZaI8VJQthvKwt0A95ZeE4XxteYfw== + dependencies: + "@graphql-tools/graphql-tag-pluck" "^6.2.6" + "@graphql-tools/utils" "^7.0.0" + tslib "~2.1.0" + +"@graphql-tools/github-loader@^6.2.4": + version "6.2.5" + resolved "https://registry.yarnpkg.com/@graphql-tools/github-loader/-/github-loader-6.2.5.tgz#460dff6f5bbaa26957a5ea3be4f452b89cc6a44b" + integrity sha512-DLuQmYeNNdPo8oWus8EePxWCfCAyUXPZ/p1PWqjrX/NGPyH2ZObdqtDAfRHztljt0F/qkBHbGHCEk2TKbRZTRw== + dependencies: + "@graphql-tools/graphql-tag-pluck" "^6.2.6" + "@graphql-tools/utils" "^7.0.0" + cross-fetch "3.0.6" + tslib "~2.0.1" + +"@graphql-tools/graphql-file-loader@^6.2.4": + version "6.2.7" + resolved "https://registry.yarnpkg.com/@graphql-tools/graphql-file-loader/-/graphql-file-loader-6.2.7.tgz#d3720f2c4f4bb90eb2a03a7869a780c61945e143" + integrity sha512-5k2SNz0W87tDcymhEMZMkd6/vs6QawDyjQXWtqkuLTBF3vxjxPD1I4dwHoxgWPIjjANhXybvulD7E+St/7s9TQ== + dependencies: + "@graphql-tools/import" "^6.2.6" + "@graphql-tools/utils" "^7.0.0" + tslib "~2.1.0" + +"@graphql-tools/graphql-tag-pluck@^6.2.4", "@graphql-tools/graphql-tag-pluck@^6.2.6", "@graphql-tools/graphql-tag-pluck@^6.5.1": + version "6.5.1" + resolved "https://registry.yarnpkg.com/@graphql-tools/graphql-tag-pluck/-/graphql-tag-pluck-6.5.1.tgz#5fb227dbb1e19f4b037792b50f646f16a2d4c686" + integrity sha512-7qkm82iFmcpb8M6/yRgzjShtW6Qu2OlCSZp8uatA3J0eMl87TxyJoUmL3M3UMMOSundAK8GmoyNVFUrueueV5Q== + dependencies: + "@babel/parser" "7.12.16" + "@babel/traverse" "7.12.13" + "@babel/types" "7.12.13" + "@graphql-tools/utils" "^7.0.0" + tslib "~2.1.0" + +"@graphql-tools/import@^6.2.4", "@graphql-tools/import@^6.2.6": + version "6.3.0" + resolved "https://registry.yarnpkg.com/@graphql-tools/import/-/import-6.3.0.tgz#171472b425ea7cba4a612ad524b96bd206ae71b6" + integrity sha512-zmaVhJ3UPjzJSb005Pjn2iWvH+9AYRXI4IUiTi14uPupiXppJP3s7S25Si3+DbHpFwurDF2nWRxBLiFPWudCqw== + dependencies: + resolve-from "5.0.0" + tslib "~2.1.0" + +"@graphql-tools/json-file-loader@^6.2.4": + version "6.2.6" + resolved "https://registry.yarnpkg.com/@graphql-tools/json-file-loader/-/json-file-loader-6.2.6.tgz#830482cfd3721a0799cbf2fe5b09959d9332739a" + integrity sha512-CnfwBSY5926zyb6fkDBHnlTblHnHI4hoBALFYXnrg0Ev4yWU8B04DZl/pBRUc459VNgO2x8/mxGIZj2hPJG1EA== + dependencies: + "@graphql-tools/utils" "^7.0.0" + tslib "~2.0.1" + +"@graphql-tools/links@^6.2.4": + version "6.2.5" + resolved "https://registry.yarnpkg.com/@graphql-tools/links/-/links-6.2.5.tgz#b172cadc4b7cbe27bfc1dc787651f92517f583bc" + integrity sha512-XeGDioW7F+HK6HHD/zCeF0HRC9s12NfOXAKv1HC0J7D50F4qqMvhdS/OkjzLoBqsgh/Gm8icRc36B5s0rOA9ig== + dependencies: + "@graphql-tools/utils" "^7.0.0" + apollo-link "1.2.14" + apollo-upload-client "14.1.2" + cross-fetch "3.0.6" + form-data "3.0.0" + is-promise "4.0.0" + tslib "~2.0.1" + +"@graphql-tools/load-files@^6.2.4": + version "6.3.0" + resolved "https://registry.yarnpkg.com/@graphql-tools/load-files/-/load-files-6.3.0.tgz#3957e21e14eb078f68fb4ebe84702f1bdc03ca23" + integrity sha512-qDEMz3f5CQz8lIvIhzJVK6Fvd6TMMbhuqded4x5I6zWEetR4AUmwneHWnQkwyIRqDDGgy6VlBw7GToucUkvQag== + dependencies: + globby "11.0.2" + tslib "~2.1.0" + unixify "1.0.0" + +"@graphql-tools/load@^6.2.4": + version "6.2.7" + resolved "https://registry.yarnpkg.com/@graphql-tools/load/-/load-6.2.7.tgz#61f7909d37fb1c095e3e8d4f7a6d3b8bb011e26a" + integrity sha512-b1qWjki1y/QvGtoqW3x8bcwget7xmMfLGsvGFWOB6m38tDbzVT3GlJViAC0nGPDks9OCoJzAdi5IYEkBaqH5GQ== + dependencies: + "@graphql-tools/merge" "^6.2.9" + "@graphql-tools/utils" "^7.5.0" + globby "11.0.2" + import-from "3.0.0" + is-glob "4.0.1" + p-limit "3.1.0" + tslib "~2.1.0" + unixify "1.0.0" + valid-url "1.0.9" + +"@graphql-tools/merge@^6.2.4", "@graphql-tools/merge@^6.2.9": + version "6.2.10" + resolved "https://registry.yarnpkg.com/@graphql-tools/merge/-/merge-6.2.10.tgz#cadb37b1bed786cba1b3c6f728c5476a164e153d" + integrity sha512-dM3n37PcslvhOAkCz7Cwk0BfoiSVKXGmCX+VMZkATbXk/0vlxUfNEpVfA5yF4IkP27F04SzFQSaNrbD0W2Rszw== + dependencies: + "@graphql-tools/schema" "^7.0.0" + "@graphql-tools/utils" "^7.5.0" + tslib "~2.1.0" + +"@graphql-tools/mock@^6.2.4": + version "6.2.4" + resolved "https://registry.yarnpkg.com/@graphql-tools/mock/-/mock-6.2.4.tgz#205323c51f89dd855d345d130c7713d0420909ea" + integrity sha512-O5Zvq/mcDZ7Ptky0IZ4EK9USmxV6FEVYq0Jxv2TI80kvxbCjt0tbEpZ+r1vIt1gZOXlAvadSHYyzWnUPh+1vkQ== + dependencies: + "@graphql-tools/schema" "^6.2.4" + "@graphql-tools/utils" "^6.2.4" + tslib "~2.0.1" + +"@graphql-tools/module-loader@^6.2.4": + version "6.2.7" + resolved "https://registry.yarnpkg.com/@graphql-tools/module-loader/-/module-loader-6.2.7.tgz#66ab9468775fac8079ca46ea9896ceea76e4ef69" + integrity sha512-ItAAbHvwfznY9h1H9FwHYDstTcm22Dr5R9GZtrWlpwqj0jaJGcBxsMB9jnK9kFqkbtFYEe4E/NsSnxsS4/vViQ== + dependencies: + "@graphql-tools/utils" "^7.5.0" + tslib "~2.1.0" + +"@graphql-tools/relay-operation-optimizer@^6.2.4": + version "6.3.0" + resolved "https://registry.yarnpkg.com/@graphql-tools/relay-operation-optimizer/-/relay-operation-optimizer-6.3.0.tgz#f8c7f6c8aa4a9cf50ab151fbc5db4f4282a79532" + integrity sha512-Or3UgRvkY9Fq1AAx7q38oPqFmTepLz7kp6wDHKyR0ceG7AvHv5En22R12mAeISInbhff4Rpwgf6cE8zHRu6bCw== + dependencies: + "@graphql-tools/utils" "^7.1.0" + relay-compiler "10.1.0" + tslib "~2.0.1" + +"@graphql-tools/resolvers-composition@^6.2.4": + version "6.2.6" + resolved "https://registry.yarnpkg.com/@graphql-tools/resolvers-composition/-/resolvers-composition-6.2.6.tgz#b369cdf2772a41a7544bf3f16a794501da34c394" + integrity sha512-QO0PC5RG0SolOksupOuB4B0tuzEsQFwQrwD9xLHCrJmjaLi66lOKMFzN40IBY5rqg0k/zqPyjII8rtzcNobvIg== + dependencies: + "@graphql-tools/utils" "^7.0.0" + lodash "4.17.21" + tslib "~2.1.0" + +"@graphql-tools/schema@^6.2.4": + version "6.2.4" + resolved "https://registry.yarnpkg.com/@graphql-tools/schema/-/schema-6.2.4.tgz#cc4e9f5cab0f4ec48500e666719d99fc5042481d" + integrity sha512-rh+14lSY1q8IPbEv2J9x8UBFJ5NrDX9W5asXEUlPp+7vraLp/Tiox4GXdgyA92JhwpYco3nTf5Bo2JDMt1KnAQ== + dependencies: + "@graphql-tools/utils" "^6.2.4" + tslib "~2.0.1" + +"@graphql-tools/schema@^7.0.0", "@graphql-tools/schema@^7.1.2": + version "7.1.3" + resolved "https://registry.yarnpkg.com/@graphql-tools/schema/-/schema-7.1.3.tgz#d816400da51fbac1f0086e35540ab63b5e30e858" + integrity sha512-ZY76hmcJlF1iyg3Im0sQ3ASRkiShjgv102vLTVcH22lEGJeCaCyyS/GF1eUHom418S60bS8Th6+autRUxfBiBg== + dependencies: + "@graphql-tools/utils" "^7.1.2" + tslib "~2.1.0" + +"@graphql-tools/stitch@^6.2.4": + version "6.2.4" + resolved "https://registry.yarnpkg.com/@graphql-tools/stitch/-/stitch-6.2.4.tgz#acfa6a577a33c0f02e4940ffff04753b23b87fd6" + integrity sha512-0C7PNkS7v7iAc001m7c1LPm5FUB0/DYw+s3OyCii6YYYHY8NwdI0roeOyeDGFJkFubWBQfjc3hoSyueKtU73mw== + dependencies: + "@graphql-tools/batch-delegate" "^6.2.4" + "@graphql-tools/delegate" "^6.2.4" + "@graphql-tools/merge" "^6.2.4" + "@graphql-tools/schema" "^6.2.4" + "@graphql-tools/utils" "^6.2.4" + "@graphql-tools/wrap" "^6.2.4" + is-promise "4.0.0" + tslib "~2.0.1" + +"@graphql-tools/url-loader@^6.2.4": + version "6.8.1" + resolved "https://registry.yarnpkg.com/@graphql-tools/url-loader/-/url-loader-6.8.1.tgz#cbfbe20f1a1bdeb9a4704e37b8286026d228920b" + integrity sha512-iE/y9IAu0cZYL7o9IIDdGm5WjxacN25nGgVqjZINYlisW/wyuBxng7DMJBAp6yM6gkxkCpMno1ljA/52MXzVPQ== + dependencies: + "@graphql-tools/delegate" "^7.0.1" + "@graphql-tools/utils" "^7.1.5" + "@graphql-tools/wrap" "^7.0.4" + "@types/websocket" "1.0.1" + cross-fetch "3.0.6" + eventsource "1.0.7" + extract-files "9.0.0" + form-data "4.0.0" + graphql-upload "^11.0.0" + graphql-ws "4.1.5" + is-promise "4.0.0" + isomorphic-ws "4.0.1" + sse-z "0.3.0" + sync-fetch "0.3.0" + tslib "~2.1.0" + valid-url "1.0.9" + ws "7.4.3" + +"@graphql-tools/utils@^6.2.4": + version "6.2.4" + resolved "https://registry.yarnpkg.com/@graphql-tools/utils/-/utils-6.2.4.tgz#38a2314d2e5e229ad4f78cca44e1199e18d55856" + integrity sha512-ybgZ9EIJE3JMOtTrTd2VcIpTXtDrn2q6eiYkeYMKRVh3K41+LZa6YnR2zKERTXqTWqhobROwLt4BZbw2O3Aeeg== + dependencies: + "@ardatan/aggregate-error" "0.0.6" + camel-case "4.1.1" + tslib "~2.0.1" + +"@graphql-tools/utils@^7.0.0", "@graphql-tools/utils@^7.1.0", "@graphql-tools/utils@^7.1.2", "@graphql-tools/utils@^7.1.5", "@graphql-tools/utils@^7.1.6", "@graphql-tools/utils@^7.2.1", "@graphql-tools/utils@^7.5.0": + version "7.5.1" + resolved "https://registry.yarnpkg.com/@graphql-tools/utils/-/utils-7.5.1.tgz#1c77ca69ffeb428e8ec51e661413bc6a5594268b" + integrity sha512-FYhSdJrU5cZ8BRuzCVV+YixLx3mXYVzowpKGPfI7re9/WvQPjlyIcjG+hd0C4u/L9Dxx46nBkiqZxZZknE6/lA== + dependencies: + "@ardatan/aggregate-error" "0.0.6" + camel-case "4.1.2" + tslib "~2.1.0" + +"@graphql-tools/wrap@^6.2.4": + version "6.2.4" + resolved "https://registry.yarnpkg.com/@graphql-tools/wrap/-/wrap-6.2.4.tgz#2709817da6e469753735a9fe038c9e99736b2c57" + integrity sha512-cyQgpybolF9DjL2QNOvTS1WDCT/epgYoiA8/8b3nwv5xmMBQ6/6nYnZwityCZ7njb7MMyk7HBEDNNlP9qNJDcA== + dependencies: + "@graphql-tools/delegate" "^6.2.4" + "@graphql-tools/schema" "^6.2.4" + "@graphql-tools/utils" "^6.2.4" + is-promise "4.0.0" + tslib "~2.0.1" + +"@graphql-tools/wrap@^7.0.4": + version "7.0.5" + resolved "https://registry.yarnpkg.com/@graphql-tools/wrap/-/wrap-7.0.5.tgz#8659a119abef11754f712b0c202e41a484951e0b" + integrity sha512-KCWBXsDfvG46GNUawRltJL4j9BMGoOG7oo3WEyCQP+SByWXiTe5cBF45SLDVQgdjljGNZhZ4Lq/7avIkF7/zDQ== + dependencies: + "@graphql-tools/delegate" "^7.0.7" + "@graphql-tools/schema" "^7.1.2" + "@graphql-tools/utils" "^7.2.1" + is-promise "4.0.0" + tslib "~2.0.1" + +"@graphql-typed-document-node/core@^3.0.0": + version "3.1.0" + resolved "https://registry.yarnpkg.com/@graphql-typed-document-node/core/-/core-3.1.0.tgz#0eee6373e11418bfe0b5638f654df7a4ca6a3950" + integrity sha512-wYn6r8zVZyQJ6rQaALBEln5B1pzxb9shV5Ef97kTvn6yVGrqyXVnDqnU24MXnFubR+rZjBY9NWuxX3FB2sTsjg== + +"@gulp-sourcemaps/map-sources@1.X": + version "1.0.0" + resolved "https://registry.yarnpkg.com/@gulp-sourcemaps/map-sources/-/map-sources-1.0.0.tgz#890ae7c5d8c877f6d384860215ace9d7ec945bda" + integrity sha1-iQrnxdjId/bThIYCFazp1+yUW9o= + dependencies: + normalize-path "^2.0.1" + through2 "^2.0.3" + +"@josephg/resolvable@^1.0.0": + version "1.0.1" + resolved "https://registry.yarnpkg.com/@josephg/resolvable/-/resolvable-1.0.1.tgz#69bc4db754d79e1a2f17a650d3466e038d94a5eb" + integrity sha512-CtzORUwWTTOTqfVtHaKRJ0I1kNQd1bpn3sUh8I3nJDVY+5/M/Oe1DnEWzPQvqq/xPIIkzzzIP7mfCoAjFRvDhg== + +"@nodelib/fs.scandir@2.1.4": + version "2.1.4" + resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.4.tgz#d4b3549a5db5de2683e0c1071ab4f140904bbf69" + integrity sha512-33g3pMJk3bg5nXbL/+CY6I2eJDzZAni49PfJnL5fghPTggPvBd/pFNSgJsdAgWptuFu7qq/ERvOYFlhvsLTCKA== + dependencies: + "@nodelib/fs.stat" "2.0.4" + run-parallel "^1.1.9" + +"@nodelib/fs.stat@2.0.4", "@nodelib/fs.stat@^2.0.2": + version "2.0.4" + resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-2.0.4.tgz#a3f2dd61bab43b8db8fa108a121cfffe4c676655" + integrity sha512-IYlHJA0clt2+Vg7bccq+TzRdJvv19c2INqBSsoOLp1je7xjtr7J26+WXR72MCdvU9q1qTzIWDfhMf+DRvQJK4Q== + +"@nodelib/fs.walk@^1.2.3": + version "1.2.6" + resolved "https://registry.yarnpkg.com/@nodelib/fs.walk/-/fs.walk-1.2.6.tgz#cce9396b30aa5afe9e3756608f5831adcb53d063" + integrity sha512-8Broas6vTtW4GIXTAHDoE32hnN2M5ykgCpWGbuXHQ15vEMqr23pB76e/GZcYsZCHALv50ktd24qhEyKr6wBtow== + dependencies: + "@nodelib/fs.scandir" "2.1.4" + fastq "^1.6.0" + +"@protobufjs/aspromise@^1.1.1", "@protobufjs/aspromise@^1.1.2": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@protobufjs/aspromise/-/aspromise-1.1.2.tgz#9b8b0cc663d669a7d8f6f5d0893a14d348f30fbf" + integrity sha1-m4sMxmPWaafY9vXQiToU00jzD78= + +"@protobufjs/base64@^1.1.2": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@protobufjs/base64/-/base64-1.1.2.tgz#4c85730e59b9a1f1f349047dbf24296034bb2735" + integrity sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg== + +"@protobufjs/codegen@^2.0.4": + version "2.0.4" + resolved "https://registry.yarnpkg.com/@protobufjs/codegen/-/codegen-2.0.4.tgz#7ef37f0d010fb028ad1ad59722e506d9262815cb" + integrity sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg== + +"@protobufjs/eventemitter@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz#355cbc98bafad5978f9ed095f397621f1d066b70" + integrity sha1-NVy8mLr61ZePntCV85diHx0Ga3A= + +"@protobufjs/fetch@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@protobufjs/fetch/-/fetch-1.1.0.tgz#ba99fb598614af65700c1619ff06d454b0d84c45" + integrity sha1-upn7WYYUr2VwDBYZ/wbUVLDYTEU= + dependencies: + "@protobufjs/aspromise" "^1.1.1" + "@protobufjs/inquire" "^1.1.0" + +"@protobufjs/float@^1.0.2": + version "1.0.2" + resolved "https://registry.yarnpkg.com/@protobufjs/float/-/float-1.0.2.tgz#5e9e1abdcb73fc0a7cb8b291df78c8cbd97b87d1" + integrity sha1-Xp4avctz/Ap8uLKR33jIy9l7h9E= + +"@protobufjs/inquire@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@protobufjs/inquire/-/inquire-1.1.0.tgz#ff200e3e7cf2429e2dcafc1140828e8cc638f089" + integrity sha1-/yAOPnzyQp4tyvwRQIKOjMY48Ik= + +"@protobufjs/path@^1.1.2": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@protobufjs/path/-/path-1.1.2.tgz#6cc2b20c5c9ad6ad0dccfd21ca7673d8d7fbf68d" + integrity sha1-bMKyDFya1q0NzP0hynZz2Nf79o0= + +"@protobufjs/pool@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@protobufjs/pool/-/pool-1.1.0.tgz#09fd15f2d6d3abfa9b65bc366506d6ad7846ff54" + integrity sha1-Cf0V8tbTq/qbZbw2ZQbWrXhG/1Q= + +"@protobufjs/utf8@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@protobufjs/utf8/-/utf8-1.1.0.tgz#a777360b5b39a1a2e5106f8e858f2fd2d060c570" + integrity sha1-p3c2C1s5oaLlEG+OhY8v0tBgxXA= + +"@redux-saga/core@^1.0.0": + version "1.1.3" + resolved "https://registry.yarnpkg.com/@redux-saga/core/-/core-1.1.3.tgz#3085097b57a4ea8db5528d58673f20ce0950f6a4" + integrity sha512-8tInBftak8TPzE6X13ABmEtRJGjtK17w7VUs7qV17S8hCO5S3+aUTWZ/DBsBJPdE8Z5jOPwYALyvofgq1Ws+kg== + dependencies: + "@babel/runtime" "^7.6.3" + "@redux-saga/deferred" "^1.1.2" + "@redux-saga/delay-p" "^1.1.2" + "@redux-saga/is" "^1.1.2" + "@redux-saga/symbols" "^1.1.2" + "@redux-saga/types" "^1.1.0" + redux "^4.0.4" + typescript-tuple "^2.2.1" + +"@redux-saga/deferred@^1.1.2": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@redux-saga/deferred/-/deferred-1.1.2.tgz#59937a0eba71fff289f1310233bc518117a71888" + integrity sha512-908rDLHFN2UUzt2jb4uOzj6afpjgJe3MjICaUNO3bvkV/kN/cNeI9PMr8BsFXB/MR8WTAZQq/PlTq8Kww3TBSQ== + +"@redux-saga/delay-p@^1.1.2": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@redux-saga/delay-p/-/delay-p-1.1.2.tgz#8f515f4b009b05b02a37a7c3d0ca9ddc157bb355" + integrity sha512-ojc+1IoC6OP65Ts5+ZHbEYdrohmIw1j9P7HS9MOJezqMYtCDgpkoqB5enAAZrNtnbSL6gVCWPHaoaTY5KeO0/g== + dependencies: + "@redux-saga/symbols" "^1.1.2" + +"@redux-saga/is@^1.1.2": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@redux-saga/is/-/is-1.1.2.tgz#ae6c8421f58fcba80faf7cadb7d65b303b97e58e" + integrity sha512-OLbunKVsCVNTKEf2cH4TYyNbbPgvmZ52iaxBD4I1fTif4+MTXMa4/Z07L83zW/hTCXwpSZvXogqMqLfex2Tg6w== + dependencies: + "@redux-saga/symbols" "^1.1.2" + "@redux-saga/types" "^1.1.0" + +"@redux-saga/symbols@^1.1.2": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@redux-saga/symbols/-/symbols-1.1.2.tgz#216a672a487fc256872b8034835afc22a2d0595d" + integrity sha512-EfdGnF423glv3uMwLsGAtE6bg+R9MdqlHEzExnfagXPrIiuxwr3bdiAwz3gi+PsrQ3yBlaBpfGLtDG8rf3LgQQ== + +"@redux-saga/types@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@redux-saga/types/-/types-1.1.0.tgz#0e81ce56b4883b4b2a3001ebe1ab298b84237204" + integrity sha512-afmTuJrylUU/0OtqzaRkbyYFFNgCF73Bvel/sw90pvGrWIZ+vyoIJqA6eMSoA6+nb443kTmulmBtC9NerXboNg== + +"@sindresorhus/is@^0.14.0": + version "0.14.0" + resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-0.14.0.tgz#9fb3a3cf3132328151f353de4632e01e52102bea" + integrity sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ== + +"@szmarczak/http-timer@^1.1.2": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@szmarczak/http-timer/-/http-timer-1.1.2.tgz#b1665e2c461a2cd92f4c1bbf50d5454de0d4b421" + integrity sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA== + dependencies: + defer-to-connect "^1.0.1" + +"@truffle/abi-utils@^0.1.4", "@truffle/abi-utils@^0.1.5": + version "0.1.5" + resolved "https://registry.yarnpkg.com/@truffle/abi-utils/-/abi-utils-0.1.5.tgz#95b39ee0cb6baf777fdbaa2ac6d901ab8b0f8c58" + integrity sha512-PvCN/qebM0boK2YycX3sMe6CwoLtB7cpYj2ugHPtcQ+Zpg1hQRGS+GRLeBuQg3RR5X8IxzLb4YPZh5dnJxMZYA== + dependencies: + change-case "3.0.2" + faker "^5.3.1" + fast-check "^2.12.1" + source-map-support "^0.5.19" + +"@truffle/blockchain-utils@^0.0.26": + version "0.0.26" + resolved "https://registry.yarnpkg.com/@truffle/blockchain-utils/-/blockchain-utils-0.0.26.tgz#f4ea794e0a18c74d73ea10e29a506c9ed0a503ee" + integrity sha512-M91NJkfapK1RqdzVwKSSenPEE2cHzAAFwC3aPhA8Y3DznRfzOcck4mDH6eY71sytVCrGaXGm/Wirn3drGSH+qQ== + dependencies: + source-map-support "^0.5.19" + +"@truffle/code-utils@^1.2.23", "@truffle/code-utils@^1.2.24": + version "1.2.24" + resolved "https://registry.yarnpkg.com/@truffle/code-utils/-/code-utils-1.2.24.tgz#8da82510e416128c45fc154e92410982ab98b426" + integrity sha512-IqpbTh4uNQueadv96GBWBaGTYTyOsLKE9Dui1wpiijON6xq2iIcTArej1vMh+nkAd5/AsP+enbBY8mksm6rFBg== + dependencies: + cbor "^5.1.0" + source-map-support "^0.5.19" + +"@truffle/codec@^0.10.1": + version "0.10.1" + resolved "https://registry.yarnpkg.com/@truffle/codec/-/codec-0.10.1.tgz#70df52ddf1c64781a23daaccda24e10bfb9dec9d" + integrity sha512-c1lC9Wcp+Z1DLvEYH3dkEtMKnUJx72CirO3kmi0OgFSA5QqTDCtfrVOhAugcb/iMLgqUK05/pexp2whb4oASKA== + dependencies: + big.js "^5.2.2" + bn.js "^5.1.3" + cbor "^5.1.0" + debug "^4.3.1" + lodash.clonedeep "^4.5.0" + lodash.escaperegexp "^4.1.2" + lodash.partition "^4.6.0" + lodash.sum "^4.0.2" + semver "^7.3.4" + source-map-support "^0.5.19" + utf8 "^3.0.0" + web3-utils "1.2.9" + +"@truffle/config@^1.2.35": + version "1.2.35" + resolved "https://registry.yarnpkg.com/@truffle/config/-/config-1.2.35.tgz#98a9ae3a964e73c33dcea4dcb172f878fdbb9bdd" + integrity sha512-ULTS9t3ldqEV1VBVNWlS9tdWJ0r637ANspzBoQd6S/Ab7CfueQhcIfp29oz6Ahcgjkl4NX+Gu/dG6/Jiys81vg== + dependencies: + "@truffle/error" "^0.0.12" + "@truffle/events" "^0.0.9" + "@truffle/provider" "^0.2.26" + configstore "^4.0.0" + find-up "^2.1.0" + lodash.assignin "^4.2.0" + lodash.merge "^4.6.2" + module "^1.2.5" + original-require "^1.0.1" + source-map-support "^0.5.19" + +"@truffle/contract-schema@^3.3.4": + version "3.3.4" + resolved "https://registry.yarnpkg.com/@truffle/contract-schema/-/contract-schema-3.3.4.tgz#95f0265cac7de7bcaa0542f5fe671a7896011bfe" + integrity sha512-HzscBl/GhZBvPNQeD9l6ewSHSkvNmE+bA0iTVa0Y2mNf5GD5Y3fK2NPyfbOdtckOvLqebvYGEDEPRiXc3BZ05g== + dependencies: + ajv "^6.10.0" + crypto-js "^3.1.9-1" + debug "^4.3.1" + +"@truffle/contract@^4.3": + version "4.3.9" + resolved "https://registry.yarnpkg.com/@truffle/contract/-/contract-4.3.9.tgz#caf515df359e72f207edc6f1d4e7b8bca88566a7" + integrity sha512-yd6nejsKEReJrPjOdRHkypfsMr337yc43qxu5b4TF2JAf2Kz7ZAWasHhY3j3xRwra3AqNOm4p3njkq8T+mKytg== + dependencies: + "@truffle/blockchain-utils" "^0.0.26" + "@truffle/contract-schema" "^3.3.4" + "@truffle/debug-utils" "^5.0.11" + "@truffle/error" "^0.0.12" + "@truffle/interface-adapter" "^0.4.19" + bignumber.js "^7.2.1" + ethereum-ens "^0.8.0" + ethers "^4.0.32" + source-map-support "^0.5.19" + web3 "1.2.9" + web3-core-helpers "1.2.9" + web3-core-promievent "1.2.9" + web3-eth-abi "1.2.9" + web3-utils "1.2.9" + +"@truffle/db@^0.5.3": + version "0.5.3" + resolved "https://registry.yarnpkg.com/@truffle/db/-/db-0.5.3.tgz#1223ee5c9b9f112abf5883f647d46ae1e45d5dbd" + integrity sha512-cNQJgcqC77xAIvFrS9R1XHmppOnlZmVZvcEqHOv0PGzcf0XA+hUkUgiOHFYn5bwTvGxLMrBlBmAnBprKlJYsRg== + dependencies: + "@truffle/abi-utils" "^0.1.4" + "@truffle/code-utils" "^1.2.23" + "@truffle/config" "^1.2.35" + apollo-server "^2.18.2" + debug "^4.3.1" + fs-extra "^9.1.0" + graphql "^15.3.0" + graphql-tag "^2.11.0" + graphql-tools "^6.2.4" + json-stable-stringify "^1.0.1" + jsondown "^1.0.0" + pascal-case "^2.0.1" + pluralize "^8.0.0" + pouchdb "7.1.1" + pouchdb-adapter-memory "^7.1.1" + pouchdb-adapter-node-websql "^7.0.0" + pouchdb-debug "^7.1.1" + pouchdb-find "^7.0.0" + source-map-support "^0.5.19" + web3-utils "1.2.9" + +"@truffle/debug-utils@^5.0.11": + version "5.0.11" + resolved "https://registry.yarnpkg.com/@truffle/debug-utils/-/debug-utils-5.0.11.tgz#297ff83943212bf593a641180e3b28b230acadaa" + integrity sha512-KurW9r1DcK9c7/I0H21YWGBKu77gWm5HfBW6T+MjuRh5FGpxZ7GPka8oQkJCAZQuZKaQc9r9BoCQYQx1NX8pIg== + dependencies: + "@truffle/codec" "^0.10.1" + "@trufflesuite/chromafi" "^2.2.2" + bn.js "^5.1.3" + chalk "^2.4.2" + debug "^4.3.1" + highlight.js "^10.4.0" + highlightjs-solidity "^1.0.21" + +"@truffle/debugger@^8.0.17": + version "8.0.17" + resolved "https://registry.yarnpkg.com/@truffle/debugger/-/debugger-8.0.17.tgz#a13cd3c967bf045e71a00bd711fb371effa06752" + integrity sha512-CpxsW3edO0gPygLUmIkhFC4hgONltYuhJIM8jhdYL4KpDe8hRlFjWeiveH++iJX/1ka1A2Wbyk9G/TtCdiLY4Q== + dependencies: + "@truffle/abi-utils" "^0.1.5" + "@truffle/codec" "^0.10.1" + "@truffle/source-map-utils" "^1.3.35" + bn.js "^5.1.3" + debug "^4.3.1" + json-pointer "^0.6.0" + json-stable-stringify "^1.0.1" + lodash.flatten "^4.4.0" + lodash.merge "^4.6.2" + lodash.sum "^4.0.2" + lodash.zipwith "^4.2.0" + redux "^3.7.2" + redux-cli-logger "^2.0.1" + redux-saga "1.0.0" + remote-redux-devtools "^0.5.12" + reselect-tree "^1.3.4" + semver "^7.3.4" + source-map-support "^0.5.19" + web3 "1.2.9" + web3-eth-abi "1.2.9" + +"@truffle/error@^0.0.12": + version "0.0.12" + resolved "https://registry.yarnpkg.com/@truffle/error/-/error-0.0.12.tgz#83e02e6ffe1d154fe274141d90038a91fd1e186d" + integrity sha512-kZqqnPR9YDJG7KCDOcN1qH16Qs0oz1PzF0Y93AWdhXuL9S9HYo/RUUeqGKbPpRBEZldQUS8aa4EzfK08u5pu6g== + +"@truffle/events@^0.0.9": + version "0.0.9" + resolved "https://registry.yarnpkg.com/@truffle/events/-/events-0.0.9.tgz#460fc72a04269526cbd8ef54069d474c22b42b23" + integrity sha512-o0rS8zkjCzg2vDJymSZyyq1eKdkRbxIFnsnYQl6Bc2StK89C/ZISenxrUe2fbdeq3L9Zq+ds1mSKH/MFK0Ejkg== + dependencies: + emittery "^0.4.1" + ora "^3.4.0" + +"@truffle/hdwallet-provider@^1.2": + version "1.2.2" + resolved "https://registry.yarnpkg.com/@truffle/hdwallet-provider/-/hdwallet-provider-1.2.2.tgz#7b42f7cb7fc1f80751c573c72ba488e59690f8ea" + integrity sha512-gpE5M9c+G7uMR9Nn2xslY0BRdl8hvlrHxBJ451g/V3WnOI5rDQMXezz6VZMn3zvWDiQTPRknx1uUDfWvMuQwqg== + dependencies: + "@trufflesuite/web3-provider-engine" "15.0.13-1" + any-promise "^1.3.0" + bindings "^1.5.0" + ethereum-cryptography "^0.1.3" + ethereum-protocol "^1.0.1" + ethereumjs-tx "^1.0.0" + ethereumjs-util "^6.1.0" + ethereumjs-wallet "^1.0.1" + source-map-support "^0.5.19" + +"@truffle/interface-adapter@^0.4.19": + version "0.4.19" + resolved "https://registry.yarnpkg.com/@truffle/interface-adapter/-/interface-adapter-0.4.19.tgz#19248ac88099f8df34f58a3d43a95ba3470dc89a" + integrity sha512-+Zz6Fr8+I2wYSS8RM3WBOMzf22QffMQTnlsYsRgRHzv3gYoRA9ZDLb84lFRfmWyw+IdXTo90tjRHEb5krC6uxg== + dependencies: + bn.js "^5.1.3" + ethers "^4.0.32" + source-map-support "^0.5.19" + web3 "1.2.9" + +"@truffle/provider@^0.2.26": + version "0.2.26" + resolved "https://registry.yarnpkg.com/@truffle/provider/-/provider-0.2.26.tgz#88e31b79973c2427c4a17d9a59411e6fbc810190" + integrity sha512-YKPmhB9S9AQkT2ePGtadwjDduxU23DXXy+5zyM5fevw5GCbXSnf+jG6rICXjPkVFjuKBlXuq5JbuERZn43522Q== + dependencies: + "@truffle/error" "^0.0.12" + "@truffle/interface-adapter" "^0.4.19" + web3 "1.2.9" + +"@truffle/source-map-utils@^1.3.35": + version "1.3.35" + resolved "https://registry.yarnpkg.com/@truffle/source-map-utils/-/source-map-utils-1.3.35.tgz#aa40422a05e2727254665ee2c23659d01230eb8f" + integrity sha512-j3PHac4g/yQwxSB899lkal/YMuIXLNNlDGfCog2QrWqdtK7HJhx6X2tftwqrZzO4JTKc1Cs8KOCPOndx9W2xeQ== + dependencies: + "@truffle/code-utils" "^1.2.24" + "@truffle/codec" "^0.10.1" + debug "^4.3.1" + json-pointer "^0.6.0" + node-interval-tree "^1.3.3" + web3-utils "1.2.9" + +"@trufflesuite/chromafi@^2.2.2": + version "2.2.2" + resolved "https://registry.yarnpkg.com/@trufflesuite/chromafi/-/chromafi-2.2.2.tgz#d3fc507aa8504faffc50fb892cedcfe98ff57f77" + integrity sha512-mItQwVBsb8qP/vaYHQ1kDt2vJLhjoEXJptT6y6fJGvFophMFhOI/NsTVUa0nJL1nyMeFiS6hSYuNVdpQZzB1gA== + dependencies: + ansi-mark "^1.0.0" + ansi-regex "^3.0.0" + array-uniq "^1.0.3" + camelcase "^4.1.0" + chalk "^2.3.2" + cheerio "^1.0.0-rc.2" + detect-indent "^5.0.0" + he "^1.1.1" + highlight.js "^10.4.1" + lodash.merge "^4.6.2" + min-indent "^1.0.0" + strip-ansi "^4.0.0" + strip-indent "^2.0.0" + super-split "^1.1.0" + +"@trufflesuite/eth-json-rpc-filters@^4.1.2-1": + version "4.1.2-1" + resolved "https://registry.yarnpkg.com/@trufflesuite/eth-json-rpc-filters/-/eth-json-rpc-filters-4.1.2-1.tgz#61ab78c52e98a883e5cf086925b34a30297b1824" + integrity sha512-/MChvC5dw2ck9NU1cZmdovCz2VKbOeIyR4tcxDvA5sT+NaL0rA2/R5U0yI7zsbo1zD+pgqav77rQHTzpUdDNJQ== + dependencies: + "@trufflesuite/eth-json-rpc-middleware" "^4.4.2-0" + await-semaphore "^0.1.3" + eth-query "^2.1.2" + json-rpc-engine "^5.1.3" + lodash.flatmap "^4.5.0" + safe-event-emitter "^1.0.1" + +"@trufflesuite/eth-json-rpc-infura@^4.0.3-0": + version "4.0.3-0" + resolved "https://registry.yarnpkg.com/@trufflesuite/eth-json-rpc-infura/-/eth-json-rpc-infura-4.0.3-0.tgz#6d22122937cf60ec9d21a02351c101fdc608c4fe" + integrity sha512-xaUanOmo0YLqRsL0SfXpFienhdw5bpQ1WEXxMTRi57az4lwpZBv4tFUDvcerdwJrxX9wQqNmgUgd1BrR01dumw== + dependencies: + "@trufflesuite/eth-json-rpc-middleware" "^4.4.2-1" + cross-fetch "^2.1.1" + eth-json-rpc-errors "^1.0.1" + json-rpc-engine "^5.1.3" + +"@trufflesuite/eth-json-rpc-middleware@^4.4.2-0", "@trufflesuite/eth-json-rpc-middleware@^4.4.2-1": + version "4.4.2-1" + resolved "https://registry.yarnpkg.com/@trufflesuite/eth-json-rpc-middleware/-/eth-json-rpc-middleware-4.4.2-1.tgz#8c3638ed8a7ed89a1e5e71407de068a65bef0df2" + integrity sha512-iEy9H8ja7/8aYES5HfrepGBKU9n/Y4OabBJEklVd/zIBlhCCBAWBqkIZgXt11nBXO/rYAeKwYuE3puH3ByYnLA== + dependencies: + "@trufflesuite/eth-sig-util" "^1.4.2" + btoa "^1.2.1" + clone "^2.1.1" + eth-json-rpc-errors "^1.0.1" + eth-query "^2.1.2" + ethereumjs-block "^1.6.0" + ethereumjs-tx "^1.3.7" + ethereumjs-util "^5.1.2" + ethereumjs-vm "^2.6.0" + fetch-ponyfill "^4.0.0" + json-rpc-engine "^5.1.3" + json-stable-stringify "^1.0.1" + pify "^3.0.0" + safe-event-emitter "^1.0.1" + +"@trufflesuite/eth-sig-util@^1.4.2": + version "1.4.2" + resolved "https://registry.yarnpkg.com/@trufflesuite/eth-sig-util/-/eth-sig-util-1.4.2.tgz#b529e2f38ac08e652116f48981132a26242a4f08" + integrity sha512-+GyfN6b0LNW77hbQlH3ufZ/1eCON7mMrGym6tdYf7xiNw9Vv3jBO72bmmos1EId2NgBvPMhmYYm6DSLQFTmzrA== + dependencies: + ethereumjs-abi "^0.6.8" + ethereumjs-util "^5.1.1" + +"@trufflesuite/web3-provider-engine@15.0.13-1": + version "15.0.13-1" + resolved "https://registry.yarnpkg.com/@trufflesuite/web3-provider-engine/-/web3-provider-engine-15.0.13-1.tgz#f6a7f7131a2fdc4ab53976318ed13ce83e8e4bcb" + integrity sha512-6u3x/iIN5fyj8pib5QTUDmIOUiwAGhaqdSTXdqCu6v9zo2BEwdCqgEJd1uXDh3DBmPRDfiZ/ge8oUPy7LerpHg== + dependencies: + "@trufflesuite/eth-json-rpc-filters" "^4.1.2-1" + "@trufflesuite/eth-json-rpc-infura" "^4.0.3-0" + "@trufflesuite/eth-json-rpc-middleware" "^4.4.2-1" + "@trufflesuite/eth-sig-util" "^1.4.2" + async "^2.5.0" + backoff "^2.5.0" + clone "^2.0.0" + cross-fetch "^2.1.0" + eth-block-tracker "^4.4.2" + eth-json-rpc-errors "^2.0.2" + ethereumjs-block "^1.2.2" + ethereumjs-tx "^1.2.0" + ethereumjs-util "^5.1.5" + ethereumjs-vm "^2.3.4" + json-stable-stringify "^1.0.1" + promise-to-callback "^1.0.0" + readable-stream "^2.2.9" + request "^2.85.0" + semaphore "^1.0.3" + ws "^5.1.1" + xhr "^2.2.0" + xtend "^4.0.1" + +"@types/accepts@*", "@types/accepts@^1.3.5": + version "1.3.5" + resolved "https://registry.yarnpkg.com/@types/accepts/-/accepts-1.3.5.tgz#c34bec115cfc746e04fe5a059df4ce7e7b391575" + integrity sha512-jOdnI/3qTpHABjM5cx1Hc0sKsPoYCp+DP/GJRGtDlPd7fiV9oXGGIcjW/ZOxLIvjGz8MA+uMZI9metHlgqbgwQ== + dependencies: + "@types/node" "*" + +"@types/bn.js@^4.11.3", "@types/bn.js@^4.11.4", "@types/bn.js@^4.11.5": + version "4.11.6" + resolved "https://registry.yarnpkg.com/@types/bn.js/-/bn.js-4.11.6.tgz#c306c70d9358aaea33cd4eda092a742b9505967c" + integrity sha512-pqr857jrp2kPuO9uRjZ3PwnJTjoQy+fcdxvBTvHm6dkmEL9q+hDD/2j/0ELOBPtPnS8LjCX0gI9nbl8lVkadpg== + dependencies: + "@types/node" "*" + +"@types/bn.js@^5.1.0": + version "5.1.0" + resolved "https://registry.yarnpkg.com/@types/bn.js/-/bn.js-5.1.0.tgz#32c5d271503a12653c62cf4d2b45e6eab8cebc68" + integrity sha512-QSSVYj7pYFN49kW77o2s9xTCwZ8F2xLbjLLSEVh8D2F4JUhZtPAGOFLTD+ffqksBx/u4cE/KImFjyhqCjn/LIA== + dependencies: + "@types/node" "*" + +"@types/body-parser@*": + version "1.19.1" + resolved "https://registry.yarnpkg.com/@types/body-parser/-/body-parser-1.19.1.tgz#0c0174c42a7d017b818303d4b5d969cb0b75929c" + integrity sha512-a6bTJ21vFOGIkwM0kzh9Yr89ziVxq4vYH2fQ6N8AeipEzai/cFK6aGMArIkUeIdRIgpwQa+2bXiLuUJCpSf2Cg== + dependencies: + "@types/connect" "*" + "@types/node" "*" + +"@types/body-parser@1.19.0": + version "1.19.0" + resolved "https://registry.yarnpkg.com/@types/body-parser/-/body-parser-1.19.0.tgz#0685b3c47eb3006ffed117cdd55164b61f80538f" + integrity sha512-W98JrE0j2K78swW4ukqMleo8R7h/pFETjM2DQ90MF6XK2i4LO4W3gQ71Lt4w3bfm2EvVSyWHplECvB5sK22yFQ== + dependencies: + "@types/connect" "*" + "@types/node" "*" + +"@types/concat-stream@^1.6.0": + version "1.6.1" + resolved "https://registry.yarnpkg.com/@types/concat-stream/-/concat-stream-1.6.1.tgz#24bcfc101ecf68e886aaedce60dfd74b632a1b74" + integrity sha512-eHE4cQPoj6ngxBZMvVf6Hw7Mh4jMW4U9lpGmS5GBPB9RYxlFg+CHaVN7ErNY4W9XfLIEn20b4VDYaIrbq0q4uA== + dependencies: + "@types/node" "*" + +"@types/connect@*", "@types/connect@^3.4.33": + version "3.4.35" + resolved "https://registry.yarnpkg.com/@types/connect/-/connect-3.4.35.tgz#5fcf6ae445e4021d1fc2219a4873cc73a3bb2ad1" + integrity sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ== + dependencies: + "@types/node" "*" + +"@types/content-disposition@*": + version "0.5.4" + resolved "https://registry.yarnpkg.com/@types/content-disposition/-/content-disposition-0.5.4.tgz#de48cf01c79c9f1560bcfd8ae43217ab028657f8" + integrity sha512-0mPF08jn9zYI0n0Q/Pnz7C4kThdSt+6LD4amsrYDDpgBfrVWa3TcCOxKX1zkGgYniGagRv8heN2cbh+CAn+uuQ== + +"@types/cookies@*": + version "0.7.7" + resolved "https://registry.yarnpkg.com/@types/cookies/-/cookies-0.7.7.tgz#7a92453d1d16389c05a5301eef566f34946cfd81" + integrity sha512-h7BcvPUogWbKCzBR2lY4oqaZbO3jXZksexYJVFvkrFeLgbZjQkU4x8pRq6eg2MHXQhY0McQdqmmsxRWlVAHooA== + dependencies: + "@types/connect" "*" + "@types/express" "*" + "@types/keygrip" "*" + "@types/node" "*" + +"@types/cors@2.8.10": + version "2.8.10" + resolved "https://registry.yarnpkg.com/@types/cors/-/cors-2.8.10.tgz#61cc8469849e5bcdd0c7044122265c39cec10cf4" + integrity sha512-C7srjHiVG3Ey1nR6d511dtDkCEjxuN9W1HWAEjGq8kpcwmNM6JJkpC0xvabM7BXTG2wDq8Eu33iH9aQKa7IvLQ== + +"@types/express-serve-static-core@^4.17.18", "@types/express-serve-static-core@^4.17.21": + version "4.17.24" + resolved "https://registry.yarnpkg.com/@types/express-serve-static-core/-/express-serve-static-core-4.17.24.tgz#ea41f93bf7e0d59cd5a76665068ed6aab6815c07" + integrity sha512-3UJuW+Qxhzwjq3xhwXm2onQcFHn76frIYVbTu+kn24LFxI+dEhdfISDFovPB8VpEgW8oQCTpRuCe+0zJxB7NEA== + dependencies: + "@types/node" "*" + "@types/qs" "*" + "@types/range-parser" "*" + +"@types/express-serve-static-core@^4.17.9": + version "4.17.30" + resolved "https://registry.yarnpkg.com/@types/express-serve-static-core/-/express-serve-static-core-4.17.30.tgz#0f2f99617fa8f9696170c46152ccf7500b34ac04" + integrity sha512-gstzbTWro2/nFed1WXtf+TtrpwxH7Ggs4RLYTLbeVgIkUQOI3WG/JKjgeOU1zXDvezllupjrf8OPIdvTbIaVOQ== + dependencies: + "@types/node" "*" + "@types/qs" "*" + "@types/range-parser" "*" + +"@types/express@*", "@types/express@^4.17.12": + version "4.17.13" + resolved "https://registry.yarnpkg.com/@types/express/-/express-4.17.13.tgz#a76e2995728999bab51a33fabce1d705a3709034" + integrity sha512-6bSZTPaTIACxn48l50SR+axgrqm6qXFIxrdAKaG6PaJk3+zuUr35hBlgT7vOmJcum+OEaIBLtHV/qloEAFITeA== + dependencies: + "@types/body-parser" "*" + "@types/express-serve-static-core" "^4.17.18" + "@types/qs" "*" + "@types/serve-static" "*" + +"@types/form-data@0.0.33": + version "0.0.33" + resolved "https://registry.yarnpkg.com/@types/form-data/-/form-data-0.0.33.tgz#c9ac85b2a5fd18435b8c85d9ecb50e6d6c893ff8" + integrity sha512-8BSvG1kGm83cyJITQMZSulnl6QV8jqAGreJsc5tPu1Jq0vTSOiY/k24Wx82JRpWwZSqrala6sd5rWi6aNXvqcw== + dependencies: + "@types/node" "*" + +"@types/fs-capacitor@*": + version "2.0.0" + resolved "https://registry.yarnpkg.com/@types/fs-capacitor/-/fs-capacitor-2.0.0.tgz#17113e25817f584f58100fb7a08eed288b81956e" + integrity sha512-FKVPOCFbhCvZxpVAMhdBdTfVfXUpsh15wFHgqOKxh9N9vzWZVuWCSijZ5T4U34XYNnuj2oduh6xcs1i+LPI+BQ== + dependencies: + "@types/node" "*" + +"@types/http-assert@*": + version "1.5.3" + resolved "https://registry.yarnpkg.com/@types/http-assert/-/http-assert-1.5.3.tgz#ef8e3d1a8d46c387f04ab0f2e8ab8cb0c5078661" + integrity sha512-FyAOrDuQmBi8/or3ns4rwPno7/9tJTijVW6aQQjK02+kOQ8zmoNg2XJtAuQhvQcy1ASJq38wirX5//9J1EqoUA== + +"@types/http-errors@*": + version "1.8.1" + resolved "https://registry.yarnpkg.com/@types/http-errors/-/http-errors-1.8.1.tgz#e81ad28a60bee0328c6d2384e029aec626f1ae67" + integrity sha512-e+2rjEwK6KDaNOm5Aa9wNGgyS9oSZU/4pfSMMPYNOfjvFI0WVXm29+ITRFr6aKDvvKo7uU1jV68MW4ScsfDi7Q== + +"@types/keygrip@*": + version "1.0.2" + resolved "https://registry.yarnpkg.com/@types/keygrip/-/keygrip-1.0.2.tgz#513abfd256d7ad0bf1ee1873606317b33b1b2a72" + integrity sha512-GJhpTepz2udxGexqos8wgaBx4I/zWIDPh/KOGEwAqtuGDkOUJu5eFvwmdBX4AmB8Odsr+9pHCQqiAqDL/yKMKw== + +"@types/koa-compose@*": + version "3.2.5" + resolved "https://registry.yarnpkg.com/@types/koa-compose/-/koa-compose-3.2.5.tgz#85eb2e80ac50be95f37ccf8c407c09bbe3468e9d" + integrity sha512-B8nG/OoE1ORZqCkBVsup/AKcvjdgoHnfi4pZMn5UwAPCbhk/96xyv284eBYW8JlQbQ7zDmnpFr68I/40mFoIBQ== + dependencies: + "@types/koa" "*" + +"@types/koa@*": + version "2.13.4" + resolved "https://registry.yarnpkg.com/@types/koa/-/koa-2.13.4.tgz#10620b3f24a8027ef5cbae88b393d1b31205726b" + integrity sha512-dfHYMfU+z/vKtQB7NUrthdAEiSvnLebvBjwHtfFmpZmB7em2N3WVQdHgnFq+xvyVgxW5jKDmjWfLD3lw4g4uTw== + dependencies: + "@types/accepts" "*" + "@types/content-disposition" "*" + "@types/cookies" "*" + "@types/http-assert" "*" + "@types/http-errors" "*" + "@types/keygrip" "*" + "@types/koa-compose" "*" + "@types/node" "*" + +"@types/lodash@^4.14.159": + version "4.14.184" + resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.14.184.tgz#23f96cd2a21a28e106dc24d825d4aa966de7a9fe" + integrity sha512-RoZphVtHbxPZizt4IcILciSWiC6dcn+eZ8oX9IWEYfDMcocdd42f7NPI6fQj+6zI8y4E0L7gu2pcZKLGTRaV9Q== + +"@types/long@^4.0.0": + version "4.0.1" + resolved "https://registry.yarnpkg.com/@types/long/-/long-4.0.1.tgz#459c65fa1867dafe6a8f322c4c51695663cc55e9" + integrity sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w== + +"@types/mime@^1": + version "1.3.2" + resolved "https://registry.yarnpkg.com/@types/mime/-/mime-1.3.2.tgz#93e25bf9ee75fe0fd80b594bc4feb0e862111b5a" + integrity sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw== + +"@types/node@*": + version "18.7.11" + resolved "https://registry.yarnpkg.com/@types/node/-/node-18.7.11.tgz#486e72cfccde88da24e1f23ff1b7d8bfb64e6250" + integrity sha512-KZhFpSLlmK/sdocfSAjqPETTMd0ug6HIMIAwkwUpU79olnZdQtMxpQP+G1wDzCH7na+FltSIhbaZuKdwZ8RDrw== + +"@types/node@^10.0.3", "@types/node@^10.1.0": + version "10.17.60" + resolved "https://registry.yarnpkg.com/@types/node/-/node-10.17.60.tgz#35f3d6213daed95da7f0f73e75bcc6980e90597b" + integrity sha512-F0KIgDJfy2nA3zMLmWGKxcH2ZVEtCZXHHdOQs2gSaQ27+lNeEfGxzkIw90aXswATX7AZ33tahPbzy6KAfUreVw== + +"@types/node@^10.12.18": + version "10.17.55" + resolved "https://registry.yarnpkg.com/@types/node/-/node-10.17.55.tgz#a147f282edec679b894d4694edb5abeb595fecbd" + integrity sha512-koZJ89uLZufDvToeWO5BrC4CR4OUfHnUz2qoPs/daQH6qq3IN62QFxCTZ+bKaCE0xaoCAJYE4AXre8AbghCrhg== + +"@types/node@^12.12.54": + version "12.20.55" + resolved "https://registry.yarnpkg.com/@types/node/-/node-12.20.55.tgz#c329cbd434c42164f846b909bd6f85b5537f6240" + integrity sha512-J8xLz7q2OFulZ2cyGTLE1TbbZcjpno7FaN6zdJNrgAdrJ+DZzh/uFR6YrTb4C+nXakvud8Q4+rbhoIWlYQbUFQ== + +"@types/node@^12.12.6", "@types/node@^12.6.1": + version "12.20.4" + resolved "https://registry.yarnpkg.com/@types/node/-/node-12.20.4.tgz#73687043dd00fcb6962c60fbf499553a24d6bdf2" + integrity sha512-xRCgeE0Q4pT5UZ189TJ3SpYuX/QGl6QIAOAIeDSbAVAd2gX1NxSZup4jNVK7cxIeP8KDSbJgcckun495isP1jQ== + +"@types/node@^8.0.0": + version "8.10.66" + resolved "https://registry.yarnpkg.com/@types/node/-/node-8.10.66.tgz#dd035d409df322acc83dff62a602f12a5783bbb3" + integrity sha512-tktOkFUA4kXx2hhhrB8bIFb5TbwzS4uOhKEmwiD+NoiL0qtP2OQ9mFldbgD4dV1djrlBYP6eBuQZiWjuHUpqFw== + +"@types/parse-json@^4.0.0": + version "4.0.0" + resolved "https://registry.yarnpkg.com/@types/parse-json/-/parse-json-4.0.0.tgz#2f8bb441434d163b35fb8ffdccd7138927ffb8c0" + integrity sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA== + +"@types/pbkdf2@^3.0.0": + version "3.1.0" + resolved "https://registry.yarnpkg.com/@types/pbkdf2/-/pbkdf2-3.1.0.tgz#039a0e9b67da0cdc4ee5dab865caa6b267bb66b1" + integrity sha512-Cf63Rv7jCQ0LaL8tNXmEyqTHuIJxRdlS5vMh1mj5voN4+QFhVZnlZruezqpWYDiJ8UTzhP0VmeLXCmBk66YrMQ== + dependencies: + "@types/node" "*" + +"@types/qs@*", "@types/qs@^6.2.31": + version "6.9.7" + resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.7.tgz#63bb7d067db107cc1e457c303bc25d511febf6cb" + integrity sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw== + +"@types/range-parser@*": + version "1.2.4" + resolved "https://registry.yarnpkg.com/@types/range-parser/-/range-parser-1.2.4.tgz#cd667bcfdd025213aafb7ca5915a932590acdcdc" + integrity sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw== + +"@types/secp256k1@^4.0.1": + version "4.0.3" + resolved "https://registry.yarnpkg.com/@types/secp256k1/-/secp256k1-4.0.3.tgz#1b8e55d8e00f08ee7220b4d59a6abe89c37a901c" + integrity sha512-Da66lEIFeIz9ltsdMZcpQvmrmmoqrfju8pm1BH8WbYjZSwUgCwXLb9C+9XYogwBITnbsSaMdVPb2ekf7TV+03w== + dependencies: + "@types/node" "*" + +"@types/serve-static@*": + version "1.13.10" + resolved "https://registry.yarnpkg.com/@types/serve-static/-/serve-static-1.13.10.tgz#f5e0ce8797d2d7cc5ebeda48a52c96c4fa47a8d9" + integrity sha512-nCkHGI4w7ZgAdNkrEu0bv+4xNV/XDqW+DydknebMOQwkpDGx8G+HTlj7R7ABI8i8nKxVw0wtKPi1D+lPOkh4YQ== + dependencies: + "@types/mime" "^1" + "@types/node" "*" + +"@types/ungap__global-this@^0.3.1": + version "0.3.1" + resolved "https://registry.yarnpkg.com/@types/ungap__global-this/-/ungap__global-this-0.3.1.tgz#18ce9f657da556037a29d50604335614ce703f4c" + integrity sha512-+/DsiV4CxXl6ZWefwHZDXSe1Slitz21tom38qPCaG0DYCS1NnDPIQDTKcmQ/tvK/edJUKkmuIDBJbmKDiB0r/g== + +"@types/websocket@1.0.1": + version "1.0.1" + resolved "https://registry.yarnpkg.com/@types/websocket/-/websocket-1.0.1.tgz#039272c196c2c0e4868a0d8a1a27bbb86e9e9138" + integrity sha512-f5WLMpezwVxCLm1xQe/kdPpQIOmL0TXYx2O15VYfYzc7hTIdxiOoOvez+McSIw3b7z/1zGovew9YSL7+h4h7/Q== + dependencies: + "@types/node" "*" + +"@types/ws@^7.0.0", "@types/ws@^7.4.4": + version "7.4.7" + resolved "https://registry.yarnpkg.com/@types/ws/-/ws-7.4.7.tgz#f7c390a36f7a0679aa69de2d501319f4f8d9b702" + integrity sha512-JQbbmxZTZehdc2iszGKs5oC3NFnjeay7mtAWrdt7qNtAVK0g19muApzAy4bm9byz79xa2ZnO/BOBC2R8RC5Lww== + dependencies: + "@types/node" "*" + +"@types/zen-observable@^0.8.0": + version "0.8.2" + resolved "https://registry.yarnpkg.com/@types/zen-observable/-/zen-observable-0.8.2.tgz#808c9fa7e4517274ed555fa158f2de4b4f468e71" + integrity sha512-HrCIVMLjE1MOozVoD86622S7aunluLb2PJdPfb3nYiEtohm8mIB/vyv0Fd37AdeMFrTUQXEunw78YloMA3Qilg== + +"@ungap/global-this@^0.4.2": + version "0.4.4" + resolved "https://registry.yarnpkg.com/@ungap/global-this/-/global-this-0.4.4.tgz#8a1b2cfcd3e26e079a847daba879308c924dd695" + integrity sha512-mHkm6FvepJECMNthFuIgpAEFmPOk71UyXuIxYfjytvFTnSDBIz7jmViO+LfHI/AjrazWije0PnSP3+/NlwzqtA== + +"@wry/context@^0.5.2": + version "0.5.4" + resolved "https://registry.yarnpkg.com/@wry/context/-/context-0.5.4.tgz#b6c28038872e0a0e1ff14eb40b5bf4cab2ab4e06" + integrity sha512-/pktJKHUXDr4D6TJqWgudOPJW2Z+Nb+bqk40jufA3uTkLbnCRKdJPiYDIa/c7mfcPH8Hr6O8zjCERpg5Sq04Zg== + dependencies: + tslib "^1.14.1" + +"@wry/equality@^0.1.2": + version "0.1.11" + resolved "https://registry.yarnpkg.com/@wry/equality/-/equality-0.1.11.tgz#35cb156e4a96695aa81a9ecc4d03787bc17f1790" + integrity sha512-mwEVBDUVODlsQQ5dfuLUS5/Tf7jqUKyhKYHmVi4fPB6bDMOfWvUPJmKgS1Z7Za/sOI3vzWt4+O7yCiL/70MogA== + dependencies: + tslib "^1.9.3" + +"@wry/equality@^0.3.0": + version "0.3.4" + resolved "https://registry.yarnpkg.com/@wry/equality/-/equality-0.3.4.tgz#37f101552b18a046d5c0c06da7b2021b15f72c03" + integrity sha512-1gQQhCPenzxw/1HzLlvSIs/59eBHJf9ZDIussjjZhqNSqQuPKQIzN6SWt4kemvlBPDi7RqMuUa03pId7MAE93g== + dependencies: + tslib "^1.14.1" + +"@wry/trie@^0.2.1": + version "0.2.2" + resolved "https://registry.yarnpkg.com/@wry/trie/-/trie-0.2.2.tgz#99f20f0fcbbcda17006069b155c826cbabfc402f" + integrity sha512-OxqBB39x6MfHaa2HpMiRMfhuUnQTddD32Ko020eBeJXq87ivX6xnSSnzKHVbA21p7iqBASz8n/07b6W5wW1BVQ== + dependencies: + tslib "^1.14.1" + +JSONStream@1.3.2: + version "1.3.2" + resolved "https://registry.yarnpkg.com/JSONStream/-/JSONStream-1.3.2.tgz#c102371b6ec3a7cf3b847ca00c20bb0fce4c6dea" + integrity sha512-mn0KSip7N4e0UDPZHnqDsHECo5uGQrixQKnAskOM1BIB8hd7QKbd6il8IPRPudPHOeHiECoCFqhyMaRO9+nWyA== + dependencies: + jsonparse "^1.2.0" + through ">=2.2.7 <3" + +JSONStream@^1.3.5: + version "1.3.5" + resolved "https://registry.yarnpkg.com/JSONStream/-/JSONStream-1.3.5.tgz#3208c1f08d3a4d99261ab64f92302bc15e111ca0" + integrity sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ== + dependencies: + jsonparse "^1.2.0" + through ">=2.2.7 <3" + +abab@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/abab/-/abab-1.0.4.tgz#5faad9c2c07f60dd76770f71cf025b62a63cfd4e" + integrity sha1-X6rZwsB/YN12dw9xzwJbYqY8/U4= + +abbrev@1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" + integrity sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q== + +abort-controller@3.0.0, abort-controller@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/abort-controller/-/abort-controller-3.0.0.tgz#eaf54d53b62bae4138e809ca225c8439a6efb392" + integrity sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg== + dependencies: + event-target-shim "^5.0.0" + +abstract-leveldown@^6.2.1: + version "6.3.0" + resolved "https://registry.yarnpkg.com/abstract-leveldown/-/abstract-leveldown-6.3.0.tgz#d25221d1e6612f820c35963ba4bd739928f6026a" + integrity sha512-TU5nlYgta8YrBMNpc9FwQzRbiXsj49gsALsXadbGHt9CROPzX5fB0rWDR5mtdpOOKa5XqRFpbj1QroPAoPzVjQ== + dependencies: + buffer "^5.5.0" + immediate "^3.2.3" + level-concat-iterator "~2.0.0" + level-supports "~1.0.0" + xtend "~4.0.0" + +abstract-leveldown@~2.6.0: + version "2.6.3" + resolved "https://registry.yarnpkg.com/abstract-leveldown/-/abstract-leveldown-2.6.3.tgz#1c5e8c6a5ef965ae8c35dfb3a8770c476b82c4b8" + integrity sha512-2++wDf/DYqkPR3o5tbfdhF96EfMApo1GpPfzOsR/ZYXdkSmELlvOOEAl9iKkRsktMPHdGjO4rtkBpf2I7TiTeA== + dependencies: + xtend "~4.0.0" + +abstract-leveldown@~2.7.1: + version "2.7.2" + resolved "https://registry.yarnpkg.com/abstract-leveldown/-/abstract-leveldown-2.7.2.tgz#87a44d7ebebc341d59665204834c8b7e0932cc93" + integrity sha512-+OVvxH2rHVEhWLdbudP6p0+dNMXu8JA1CbhP19T8paTYAcX7oJ4OVjT+ZUVpv7mITxXHqDMej+GdqXBmXkw09w== + dependencies: + xtend "~4.0.0" + +abstract-leveldown@~6.0.0, abstract-leveldown@~6.0.1: + version "6.0.3" + resolved "https://registry.yarnpkg.com/abstract-leveldown/-/abstract-leveldown-6.0.3.tgz#b4b6159343c74b0c5197b2817854782d8f748c4a" + integrity sha512-jzewKKpZbaYUa6HTThnrl+GrJhzjEAeuc7hTVpZdzg7kupXZFoqQDFwyOwLNbmJKJlmzw8yiipMPkDiuKkT06Q== + dependencies: + level-concat-iterator "~2.0.0" + xtend "~4.0.0" + +abstract-leveldown@~6.2.1: + version "6.2.3" + resolved "https://registry.yarnpkg.com/abstract-leveldown/-/abstract-leveldown-6.2.3.tgz#036543d87e3710f2528e47040bc3261b77a9a8eb" + integrity sha512-BsLm5vFMRUrrLeCcRc+G0t2qOaTzpoJQLOubq2XM72eNpjF5UdU5o/5NvlNhx95XHcAvcl8OMXr4mlg/fRgUXQ== + dependencies: + buffer "^5.5.0" + immediate "^3.2.3" + level-concat-iterator "~2.0.0" + level-supports "~1.0.0" + xtend "~4.0.0" + +accepts@^1.3.5, accepts@~1.3.8: + version "1.3.8" + resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.8.tgz#0bf0be125b67014adcb0b0921e62db7bffe16b2e" + integrity sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw== + dependencies: + mime-types "~2.1.34" + negotiator "0.6.3" + +acorn-globals@^1.0.4: + version "1.0.9" + resolved "https://registry.yarnpkg.com/acorn-globals/-/acorn-globals-1.0.9.tgz#55bb5e98691507b74579d0513413217c380c54cf" + integrity sha1-VbtemGkVB7dFedBRNBMhfDgMVM8= + dependencies: + acorn "^2.1.0" + +acorn@4.X: + version "4.0.13" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-4.0.13.tgz#105495ae5361d697bd195c825192e1ad7f253787" + integrity sha1-EFSVrlNh1pe9GVyCUZLhrX8lN4c= + +acorn@^2.1.0, acorn@^2.4.0: + version "2.7.0" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-2.7.0.tgz#ab6e7d9d886aaca8b085bc3312b79a198433f0e7" + integrity sha1-q259nYhqrKiwhbwzEreaGYQz8Oc= + +aes-js@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/aes-js/-/aes-js-3.0.0.tgz#e21df10ad6c2053295bcbb8dab40b09dbea87e4d" + integrity sha1-4h3xCtbCBTKVvLuNq0Cwnb6ofk0= + +aes-js@^3.1.1: + version "3.1.2" + resolved "https://registry.yarnpkg.com/aes-js/-/aes-js-3.1.2.tgz#db9aabde85d5caabbfc0d4f2a4446960f627146a" + integrity sha512-e5pEa2kBnBOgR4Y/p20pskXI74UEz7de8ZGVo58asOtvSVG5YAbJeELPZxOmt+Bnz3rX753YKhfIn4X4l1PPRQ== + +ajv@^6.10.0, ajv@^6.12.3: + version "6.12.6" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4" + integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== + dependencies: + fast-deep-equal "^3.1.1" + fast-json-stable-stringify "^2.0.0" + json-schema-traverse "^0.4.1" + uri-js "^4.2.2" + +ansi-colors@4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-4.1.1.tgz#cbb9ae256bf750af1eab344f229aa27fe94ba348" + integrity sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA== + +ansi-colors@^3.2.1: + version "3.2.4" + resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-3.2.4.tgz#e3a3da4bfbae6c86a9c285625de124a234026fbf" + integrity sha512-hHUXGagefjN2iRrID63xckIvotOXOojhQKWIPUZ4mNUZ9nLZW+7FMNoE1lOkEhNWYsx/7ysGIuJYCiMAA9FnrA== + +ansi-mark@^1.0.0: + version "1.0.4" + resolved "https://registry.yarnpkg.com/ansi-mark/-/ansi-mark-1.0.4.tgz#1cd4ba8d57f15f109d6aaf6ec9ca9786c8a4ee6c" + integrity sha1-HNS6jVfxXxCdaq9uycqXhsik7mw= + dependencies: + ansi-regex "^3.0.0" + array-uniq "^1.0.3" + chalk "^2.3.2" + strip-ansi "^4.0.0" + super-split "^1.1.0" + +ansi-regex@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df" + integrity sha1-w7M6te42DYbg5ijwRorn7yfWVN8= + +ansi-regex@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-3.0.1.tgz#123d6479e92ad45ad897d4054e3c7ca7db4944e1" + integrity sha512-+O9Jct8wf++lXxxFc4hc8LsjaSq0HFzzL7cVsw8pRDIPdjKD2mT4ytDZlLuSBZ4cLKZFXIrMGO7DbQCtMJJMKw== + +ansi-regex@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-4.1.0.tgz#8b9f8f08cf1acb843756a839ca8c7e3168c51997" + integrity sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg== + +ansi-regex@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304" + integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== + +ansi-styles@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe" + integrity sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4= + +ansi-styles@^3.2.0, ansi-styles@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" + integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== + dependencies: + color-convert "^1.9.0" + +ansi-styles@^4.0.0, ansi-styles@^4.1.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937" + integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg== + dependencies: + color-convert "^2.0.1" + +any-promise@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/any-promise/-/any-promise-1.3.0.tgz#abc6afeedcea52e809cdc0376aed3ce39635d17f" + integrity sha1-q8av7tzqUugJzcA3au0845Y10X8= + +anymatch@~3.1.1: + version "3.1.2" + resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.2.tgz#c0557c096af32f106198f4f4e2a383537e378716" + integrity sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg== + dependencies: + normalize-path "^3.0.0" + picomatch "^2.0.4" + +apisauce@^1.0.1: + version "1.1.5" + resolved "https://registry.yarnpkg.com/apisauce/-/apisauce-1.1.5.tgz#31d41a5cf805e401266cec67faf1a50f4aeae234" + integrity sha512-gKC8qb/bDJsPsnEXLZnXJ7gVx7dh87CEVNeIwv1dvaffnXoh5GHwac5pWR1P2broLiVj/fqFMQvLDDt/RhjiqA== + dependencies: + axios "^0.21.2" + ramda "^0.25.0" + +apisauce@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/apisauce/-/apisauce-2.0.1.tgz#cf5af56ea6ff5145e6eeb8d4ba471c7e0662b8c4" + integrity sha512-mJBw3pKmtfVoP6oifnf7/iRJQtNkVb6GkYsVOXN2pidootj1mhGBtzYHOX9FVBzAz5QV2GMu8IJtiNIgZ44kHQ== + dependencies: + axios "^0.21.1" + ramda "^0.25.0" + +apollo-cache-control@^0.14.0: + version "0.14.0" + resolved "https://registry.yarnpkg.com/apollo-cache-control/-/apollo-cache-control-0.14.0.tgz#95f20c3e03e7994e0d1bd48c59aeaeb575ed0ce7" + integrity sha512-qN4BCq90egQrgNnTRMUHikLZZAprf3gbm8rC5Vwmc6ZdLolQ7bFsa769Hqi6Tq/lS31KLsXBLTOsRbfPHph12w== + dependencies: + apollo-server-env "^3.1.0" + apollo-server-plugin-base "^0.13.0" + +apollo-datasource@^0.9.0: + version "0.9.0" + resolved "https://registry.yarnpkg.com/apollo-datasource/-/apollo-datasource-0.9.0.tgz#b0b2913257a6103a5f4c03cb56d78a30e9d850db" + integrity sha512-y8H99NExU1Sk4TvcaUxTdzfq2SZo6uSj5dyh75XSQvbpH6gdAXIW9MaBcvlNC7n0cVPsidHmOcHOWxJ/pTXGjA== + dependencies: + apollo-server-caching "^0.7.0" + apollo-server-env "^3.1.0" + +apollo-fetch@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/apollo-fetch/-/apollo-fetch-0.7.0.tgz#63c255a0ccb1b4c473524d8f9b536d72438bd3e7" + integrity sha512-0oHsDW3Zxx+Of1wuqcOXruNj4Kv55WN69tkIjwkCQDEIrgCpgA2scjChFsgflSVMy/1mkTKCY1Mc0TYJhNRzmw== + dependencies: + cross-fetch "^1.0.0" + +apollo-graphql@^0.9.0: + version "0.9.5" + resolved "https://registry.yarnpkg.com/apollo-graphql/-/apollo-graphql-0.9.5.tgz#9113483ca7f7fa49ee9e9a299c45d30b1cf3bf61" + integrity sha512-RGt5k2JeBqrmnwRM0VOgWFiGKlGJMfmiif/4JvdaEqhMJ+xqe/9cfDYzXfn33ke2eWixsAbjEbRfy8XbaN9nTw== + dependencies: + core-js-pure "^3.10.2" + lodash.sortby "^4.7.0" + sha.js "^2.4.11" + +apollo-link@1.2.14, apollo-link@^1.2.14: + version "1.2.14" + resolved "https://registry.yarnpkg.com/apollo-link/-/apollo-link-1.2.14.tgz#3feda4b47f9ebba7f4160bef8b977ba725b684d9" + integrity sha512-p67CMEFP7kOG1JZ0ZkYZwRDa369w5PIjtMjvrQd/HnIV8FRsHRqLqK+oAZQnFa1DDdZtOtHTi+aMIW6EatC2jg== + dependencies: + apollo-utilities "^1.3.0" + ts-invariant "^0.4.0" + tslib "^1.9.3" + zen-observable-ts "^0.8.21" + +apollo-reporting-protobuf@^0.8.0: + version "0.8.0" + resolved "https://registry.yarnpkg.com/apollo-reporting-protobuf/-/apollo-reporting-protobuf-0.8.0.tgz#ae9d967934d3d8ed816fc85a0d8068ef45c371b9" + integrity sha512-B3XmnkH6Y458iV6OsA7AhfwvTgeZnFq9nPVjbxmLKnvfkEl8hYADtz724uPa0WeBiD7DSFcnLtqg9yGmCkBohg== + dependencies: + "@apollo/protobufjs" "1.2.2" + +apollo-server-caching@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/apollo-server-caching/-/apollo-server-caching-0.7.0.tgz#e6d1e68e3bb571cba63a61f60b434fb771c6ff39" + integrity sha512-MsVCuf/2FxuTFVhGLK13B+TZH9tBd2qkyoXKKILIiGcZ5CDUEBO14vIV63aNkMkS1xxvK2U4wBcuuNj/VH2Mkw== + dependencies: + lru-cache "^6.0.0" + +apollo-server-core@^2.25.3: + version "2.25.3" + resolved "https://registry.yarnpkg.com/apollo-server-core/-/apollo-server-core-2.25.3.tgz#1a649fd14b3928f5b6e65f0002b380fcfde56862" + integrity sha512-Midow3uZoJ9TjFNeCNSiWElTVZlvmB7G7tG6PPoxIR9Px90/v16Q6EzunDIO0rTJHRC3+yCwZkwtf8w2AcP0sA== + dependencies: + "@apollographql/apollo-tools" "^0.5.0" + "@apollographql/graphql-playground-html" "1.6.27" + "@apollographql/graphql-upload-8-fork" "^8.1.3" + "@josephg/resolvable" "^1.0.0" + "@types/ws" "^7.0.0" + apollo-cache-control "^0.14.0" + apollo-datasource "^0.9.0" + apollo-graphql "^0.9.0" + apollo-reporting-protobuf "^0.8.0" + apollo-server-caching "^0.7.0" + apollo-server-env "^3.1.0" + apollo-server-errors "^2.5.0" + apollo-server-plugin-base "^0.13.0" + apollo-server-types "^0.9.0" + apollo-tracing "^0.15.0" + async-retry "^1.2.1" + fast-json-stable-stringify "^2.0.0" + graphql-extensions "^0.15.0" + graphql-tag "^2.11.0" + graphql-tools "^4.0.8" + loglevel "^1.6.7" + lru-cache "^6.0.0" + sha.js "^2.4.11" + subscriptions-transport-ws "^0.9.19" + uuid "^8.0.0" + +apollo-server-env@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/apollo-server-env/-/apollo-server-env-3.1.0.tgz#0733c2ef50aea596cc90cf40a53f6ea2ad402cd0" + integrity sha512-iGdZgEOAuVop3vb0F2J3+kaBVi4caMoxefHosxmgzAbbSpvWehB8Y1QiSyyMeouYC38XNVk5wnZl+jdGSsWsIQ== + dependencies: + node-fetch "^2.6.1" + util.promisify "^1.0.0" + +apollo-server-errors@^2.5.0: + version "2.5.0" + resolved "https://registry.yarnpkg.com/apollo-server-errors/-/apollo-server-errors-2.5.0.tgz#5d1024117c7496a2979e3e34908b5685fe112b68" + integrity sha512-lO5oTjgiC3vlVg2RKr3RiXIIQ5pGXBFxYGGUkKDhTud3jMIhs+gel8L8zsEjKaKxkjHhCQAA/bcEfYiKkGQIvA== + +apollo-server-express@^2.25.3: + version "2.25.3" + resolved "https://registry.yarnpkg.com/apollo-server-express/-/apollo-server-express-2.25.3.tgz#33fe0dae27fa71c8710e714efd93451bf2eb105f" + integrity sha512-tTFYn0oKH2qqLwVj7Ez2+MiKleXACODiGh5IxsB7VuYCPMAi9Yl8iUSlwTjQUvgCWfReZjnf0vFL2k5YhDlrtQ== + dependencies: + "@apollographql/graphql-playground-html" "1.6.27" + "@types/accepts" "^1.3.5" + "@types/body-parser" "1.19.0" + "@types/cors" "2.8.10" + "@types/express" "^4.17.12" + "@types/express-serve-static-core" "^4.17.21" + accepts "^1.3.5" + apollo-server-core "^2.25.3" + apollo-server-types "^0.9.0" + body-parser "^1.18.3" + cors "^2.8.5" + express "^4.17.1" + graphql-subscriptions "^1.0.0" + graphql-tools "^4.0.8" + parseurl "^1.3.2" + subscriptions-transport-ws "^0.9.19" + type-is "^1.6.16" + +apollo-server-plugin-base@^0.13.0: + version "0.13.0" + resolved "https://registry.yarnpkg.com/apollo-server-plugin-base/-/apollo-server-plugin-base-0.13.0.tgz#3f85751a420d3c4625355b6cb3fbdd2acbe71f13" + integrity sha512-L3TMmq2YE6BU6I4Tmgygmd0W55L+6XfD9137k+cWEBFu50vRY4Re+d+fL5WuPkk5xSPKd/PIaqzidu5V/zz8Kg== + dependencies: + apollo-server-types "^0.9.0" + +apollo-server-types@^0.9.0: + version "0.9.0" + resolved "https://registry.yarnpkg.com/apollo-server-types/-/apollo-server-types-0.9.0.tgz#ccf550b33b07c48c72f104fbe2876232b404848b" + integrity sha512-qk9tg4Imwpk732JJHBkhW0jzfG0nFsLqK2DY6UhvJf7jLnRePYsPxWfPiNkxni27pLE2tiNlCwoDFSeWqpZyBg== + dependencies: + apollo-reporting-protobuf "^0.8.0" + apollo-server-caching "^0.7.0" + apollo-server-env "^3.1.0" + +apollo-server@^2.18.2: + version "2.25.3" + resolved "https://registry.yarnpkg.com/apollo-server/-/apollo-server-2.25.3.tgz#2e5db9ce5217389625ac5014551dcbdeeedcd1d8" + integrity sha512-+eUY2//DLkU7RkJLn6CTl1P89/ZMHuUQnWqv8La2iJ2hLT7Me+nMx+hgHl3LqlT/qDstQ8qA45T85FuCayplmQ== + dependencies: + apollo-server-core "^2.25.3" + apollo-server-express "^2.25.3" + express "^4.0.0" + graphql-subscriptions "^1.0.0" + graphql-tools "^4.0.8" + stoppable "^1.1.0" + +apollo-tracing@^0.15.0: + version "0.15.0" + resolved "https://registry.yarnpkg.com/apollo-tracing/-/apollo-tracing-0.15.0.tgz#237fbbbf669aee4370b7e9081b685eabaa8ce84a" + integrity sha512-UP0fztFvaZPHDhIB/J+qGuy6hWO4If069MGC98qVs0I8FICIGu4/8ykpX3X3K6RtaQ56EDAWKykCxFv4ScxMeA== + dependencies: + apollo-server-env "^3.1.0" + apollo-server-plugin-base "^0.13.0" + +apollo-upload-client@14.1.2: + version "14.1.2" + resolved "https://registry.yarnpkg.com/apollo-upload-client/-/apollo-upload-client-14.1.2.tgz#7a72b000f1cd67eaf8f12b4bda2796d0898c0dae" + integrity sha512-ozaW+4tnVz1rpfwiQwG3RCdCcZ93RV/37ZQbRnObcQ9mjb+zur58sGDPVg9Ef3fiujLmiE/Fe9kdgvIMA3VOjA== + dependencies: + "@apollo/client" "^3.1.5" + "@babel/runtime" "^7.11.2" + extract-files "^9.0.0" + +apollo-utilities@^1.0.1, apollo-utilities@^1.3.0: + version "1.3.4" + resolved "https://registry.yarnpkg.com/apollo-utilities/-/apollo-utilities-1.3.4.tgz#6129e438e8be201b6c55b0f13ce49d2c7175c9cf" + integrity sha512-pk2hiWrCXMAy2fRPwEyhvka+mqwzeP60Jr1tRYi5xru+3ko94HI9o6lK0CT33/w4RDlxWchmdhDCrvdr+pHCig== + dependencies: + "@wry/equality" "^0.1.2" + fast-json-stable-stringify "^2.0.0" + ts-invariant "^0.4.0" + tslib "^1.10.0" + +app-module-path@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/app-module-path/-/app-module-path-2.2.0.tgz#641aa55dfb7d6a6f0a8141c4b9c0aa50b6c24dd5" + integrity sha512-gkco+qxENJV+8vFcDiiFhuoSvRXb2a/QPqpSoWhVz829VNJfOTnELbBmPmNKFxf3xdNnw4DWCkzkDaavcX/1YQ== + +aproba@^1.0.3: + version "1.2.0" + resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.2.0.tgz#6802e6264efd18c790a1b0d517f0f2627bf2c94a" + integrity sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw== + +are-we-there-yet@~1.1.2: + version "1.1.5" + resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.5.tgz#4b35c2944f062a8bfcda66410760350fe9ddfc21" + integrity sha512-5hYdAkZlcG8tOLujVDTgCT+uPX0VnpAH28gWsLfzpXYm7wP6mp5Q/gYyR7YQ0cKVJcXJnl3j2kpBan13PtQf6w== + dependencies: + delegates "^1.0.0" + readable-stream "^2.0.6" + +argparse@^1.0.7: + version "1.0.10" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" + integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg== + dependencies: + sprintf-js "~1.0.2" + +argsarray@0.0.1, argsarray@^0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/argsarray/-/argsarray-0.0.1.tgz#6e7207b4ecdb39b0af88303fa5ae22bda8df61cb" + integrity sha1-bnIHtOzbObCviDA/pa4ivajfYcs= + +arr-diff@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-2.0.0.tgz#8f3b827f955a8bd669697e4a4256ac3ceae356cf" + integrity sha1-jzuCf5Vai9ZpaX5KQlasPOrjVs8= + dependencies: + arr-flatten "^1.0.1" + +arr-flatten@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.1.0.tgz#36048bbff4e7b47e136644316c99669ea5ae91f1" + integrity sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg== + +array-filter@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/array-filter/-/array-filter-1.0.0.tgz#baf79e62e6ef4c2a4c0b831232daffec251f9d83" + integrity sha1-uveeYubvTCpMC4MSMtr/7CUfnYM= + +array-flatten@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2" + integrity sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg== + +array-union@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d" + integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw== + +array-uniq@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/array-uniq/-/array-uniq-1.0.3.tgz#af6ac877a25cc7f74e058894753858dfdb24fdb6" + integrity sha1-r2rId6Jcx/dOBYiUdThY39sk/bY= + +array-unique@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.2.1.tgz#a1d97ccafcbc2625cc70fadceb36a50c58b01a53" + integrity sha1-odl8yvy8JiXMcPrc6zalDFiwGlM= + +array.prototype.map@^1.0.1: + version "1.0.3" + resolved "https://registry.yarnpkg.com/array.prototype.map/-/array.prototype.map-1.0.3.tgz#1609623618d3d84134a37d4a220030c2bd18420b" + integrity sha512-nNcb30v0wfDyIe26Yif3PcV1JXQp4zEeEfupG7L4SRjnD6HLbO5b2a7eVSba53bOx4YCHYMBHt+Fp4vYstneRA== + dependencies: + call-bind "^1.0.0" + define-properties "^1.1.3" + es-abstract "^1.18.0-next.1" + es-array-method-boxes-properly "^1.0.0" + is-string "^1.0.5" + +asap@~2.0.3, asap@~2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.6.tgz#e50347611d7e690943208bbdafebcbc2fb866d46" + integrity sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY= + +asmcrypto.js@^2.3.2: + version "2.3.2" + resolved "https://registry.yarnpkg.com/asmcrypto.js/-/asmcrypto.js-2.3.2.tgz#b9f84bd0a1fb82f21f8c29cc284a707ad17bba2e" + integrity sha512-3FgFARf7RupsZETQ1nHnhLUUvpcttcCq1iZCaVAbJZbCZ5VNRrNyvpDyHTOb0KC3llFcsyOT/a99NZcCbeiEsA== + +asn1.js@^5.0.1, asn1.js@^5.2.0: + version "5.4.1" + resolved "https://registry.yarnpkg.com/asn1.js/-/asn1.js-5.4.1.tgz#11a980b84ebb91781ce35b0fdc2ee294e3783f07" + integrity sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA== + dependencies: + bn.js "^4.0.0" + inherits "^2.0.1" + minimalistic-assert "^1.0.0" + safer-buffer "^2.1.0" + +asn1@~0.2.3: + version "0.2.6" + resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.6.tgz#0d3a7bb6e64e02a90c0303b31f292868ea09a08d" + integrity sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ== + dependencies: + safer-buffer "~2.1.0" + +assemblyscript@0.19.10: + version "0.19.10" + resolved "https://registry.yarnpkg.com/assemblyscript/-/assemblyscript-0.19.10.tgz#7ede6d99c797a219beb4fa4614c3eab9e6343c8e" + integrity sha512-HavcUBXB3mBTRGJcpvaQjmnmaqKHBGREjSPNsIvnAk2f9dj78y4BkMaSSdvBQYWcDDzsHQjyUC8stICFkD1Odg== + dependencies: + binaryen "101.0.0-nightly.20210723" + long "^4.0.0" + +assert-plus@1.0.0, assert-plus@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" + integrity sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw== + +async-eventemitter@^0.2.2: + version "0.2.4" + resolved "https://registry.yarnpkg.com/async-eventemitter/-/async-eventemitter-0.2.4.tgz#f5e7c8ca7d3e46aab9ec40a292baf686a0bafaca" + integrity sha512-pd20BwL7Yt1zwDFy+8MX8F1+WCT8aQeKj0kQnTrH9WaeRETlRamVhD0JtRPmrV4GfOJ2F9CvdQkZeZhnh2TuHw== + dependencies: + async "^2.4.0" + +async-limiter@^1.0.0, async-limiter@~1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/async-limiter/-/async-limiter-1.0.1.tgz#dd379e94f0db8310b08291f9d64c3209766617fd" + integrity sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ== + +async-retry@^1.2.1: + version "1.3.3" + resolved "https://registry.yarnpkg.com/async-retry/-/async-retry-1.3.3.tgz#0e7f36c04d8478e7a58bdbed80cedf977785f280" + integrity sha512-wfr/jstw9xNi/0teMHrRW7dsz3Lt5ARhYNZ2ewpadnhaIp5mbALhOAP+EAdsC7t4Z6wqsDVv9+W6gm1Dk9mEyw== + dependencies: + retry "0.13.1" + +async@^1.4.2: + version "1.5.2" + resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a" + integrity sha1-7GphrlZIDAw8skHJVhjiCJL5Zyo= + +async@^2.0.1, async@^2.1.2, async@^2.4.0, async@^2.5.0: + version "2.6.3" + resolved "https://registry.yarnpkg.com/async/-/async-2.6.3.tgz#d72625e2344a3656e3a3ad4fa749fa83299d82ff" + integrity sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg== + dependencies: + lodash "^4.17.14" + +async@^2.6.1, async@^2.6.2, async@^2.6.3: + version "2.6.4" + resolved "https://registry.yarnpkg.com/async/-/async-2.6.4.tgz#706b7ff6084664cd7eae713f6f965433b5504221" + integrity sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA== + dependencies: + lodash "^4.17.14" + +asynckit@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" + integrity sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q== + +at-least-node@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/at-least-node/-/at-least-node-1.0.0.tgz#602cd4b46e844ad4effc92a8011a3c46e0238dc2" + integrity sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg== + +atob@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/atob/-/atob-2.1.2.tgz#6d9517eb9e030d2436666651e86bd9f6f13533c9" + integrity sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg== + +available-typed-arrays@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/available-typed-arrays/-/available-typed-arrays-1.0.2.tgz#6b098ca9d8039079ee3f77f7b783c4480ba513f5" + integrity sha512-XWX3OX8Onv97LMk/ftVyBibpGwY5a8SmuxZPzeOxqmuEqUCOM9ZE+uIaD1VNJ5QnvU2UQusvmKbuM1FR8QWGfQ== + dependencies: + array-filter "^1.0.0" + +await-semaphore@^0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/await-semaphore/-/await-semaphore-0.1.3.tgz#2b88018cc8c28e06167ae1cdff02504f1f9688d3" + integrity sha512-d1W2aNSYcz/sxYO4pMGX9vq65qOTu0P800epMud+6cYYX0QcT7zyqcxec3VWzpgvdXo57UWmVbZpLMjX2m1I7Q== + +aws-sign2@~0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8" + integrity sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA== + +aws4@^1.8.0: + version "1.11.0" + resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.11.0.tgz#d61f46d83b2519250e2784daf5b09479a8b41c59" + integrity sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA== + +axios@^0.21.1, axios@^0.21.2: + version "0.21.4" + resolved "https://registry.yarnpkg.com/axios/-/axios-0.21.4.tgz#c67b90dc0568e5c1cf2b0b858c43ba28e2eda575" + integrity sha512-ut5vewkiu8jjGBdqpM44XxjuCjq9LAKeHVmoVfHVzy8eHgxxq8SbAVQNovDA8mVi05kP0Ea/n/UzcSHcTJQfNg== + dependencies: + follow-redirects "^1.14.0" + +babel-code-frame@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.26.0.tgz#63fd43f7dc1e3bb7ce35947db8fe369a3f58c74b" + integrity sha1-Y/1D99weO7fONZR9uP42mj9Yx0s= + dependencies: + chalk "^1.1.3" + esutils "^2.0.2" + js-tokens "^3.0.2" + +babel-core@^6.26.0: + version "6.26.3" + resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-6.26.3.tgz#b2e2f09e342d0f0c88e2f02e067794125e75c207" + integrity sha512-6jyFLuDmeidKmUEb3NM+/yawG0M2bDZ9Z1qbZP59cyHLz8kYGKYwpJP0UwUKKUiTRNvxfLesJnTedqczP7cTDA== + dependencies: + babel-code-frame "^6.26.0" + babel-generator "^6.26.0" + babel-helpers "^6.24.1" + babel-messages "^6.23.0" + babel-register "^6.26.0" + babel-runtime "^6.26.0" + babel-template "^6.26.0" + babel-traverse "^6.26.0" + babel-types "^6.26.0" + babylon "^6.18.0" + convert-source-map "^1.5.1" + debug "^2.6.9" + json5 "^0.5.1" + lodash "^4.17.4" + minimatch "^3.0.4" + path-is-absolute "^1.0.1" + private "^0.1.8" + slash "^1.0.0" + source-map "^0.5.7" + +babel-generator@6.26.1, babel-generator@^6.26.0: + version "6.26.1" + resolved "https://registry.yarnpkg.com/babel-generator/-/babel-generator-6.26.1.tgz#1844408d3b8f0d35a404ea7ac180f087a601bd90" + integrity sha512-HyfwY6ApZj7BYTcJURpM5tznulaBvyio7/0d4zFOeMPUmfxkCjHocCuoLa2SAGzBI8AREcH3eP3758F672DppA== + dependencies: + babel-messages "^6.23.0" + babel-runtime "^6.26.0" + babel-types "^6.26.0" + detect-indent "^4.0.0" + jsesc "^1.3.0" + lodash "^4.17.4" + source-map "^0.5.7" + trim-right "^1.0.1" + +babel-helpers@^6.24.1: + version "6.24.1" + resolved "https://registry.yarnpkg.com/babel-helpers/-/babel-helpers-6.24.1.tgz#3471de9caec388e5c850e597e58a26ddf37602b2" + integrity sha1-NHHenK7DiOXIUOWX5Yom3fN2ArI= + dependencies: + babel-runtime "^6.22.0" + babel-template "^6.24.1" + +babel-messages@^6.23.0: + version "6.23.0" + resolved "https://registry.yarnpkg.com/babel-messages/-/babel-messages-6.23.0.tgz#f3cdf4703858035b2a2951c6ec5edf6c62f2630e" + integrity sha1-8830cDhYA1sqKVHG7F7fbGLyYw4= + dependencies: + babel-runtime "^6.22.0" + +babel-plugin-dynamic-import-node@^2.3.3: + version "2.3.3" + resolved "https://registry.yarnpkg.com/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz#84fda19c976ec5c6defef57f9427b3def66e17a3" + integrity sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ== + dependencies: + object.assign "^4.1.0" + +babel-plugin-polyfill-corejs2@^0.1.4: + version "0.1.10" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.1.10.tgz#a2c5c245f56c0cac3dbddbf0726a46b24f0f81d1" + integrity sha512-DO95wD4g0A8KRaHKi0D51NdGXzvpqVLnLu5BTvDlpqUEpTmeEtypgC1xqesORaWmiUOQI14UHKlzNd9iZ2G3ZA== + dependencies: + "@babel/compat-data" "^7.13.0" + "@babel/helper-define-polyfill-provider" "^0.1.5" + semver "^6.1.1" + +babel-plugin-polyfill-corejs3@^0.1.3: + version "0.1.7" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.1.7.tgz#80449d9d6f2274912e05d9e182b54816904befd0" + integrity sha512-u+gbS9bbPhZWEeyy1oR/YaaSpod/KDT07arZHb80aTpl8H5ZBq+uN1nN9/xtX7jQyfLdPfoqI4Rue/MQSWJquw== + dependencies: + "@babel/helper-define-polyfill-provider" "^0.1.5" + core-js-compat "^3.8.1" + +babel-plugin-polyfill-regenerator@^0.1.2: + version "0.1.6" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.1.6.tgz#0fe06a026fe0faa628ccc8ba3302da0a6ce02f3f" + integrity sha512-OUrYG9iKPKz8NxswXbRAdSwF0GhRdIEMTloQATJi4bDuFqrXaXcCUT/VGNrr8pBcjMh1RxZ7Xt9cytVJTJfvMg== + dependencies: + "@babel/helper-define-polyfill-provider" "^0.1.5" + +babel-plugin-syntax-trailing-function-commas@^7.0.0-beta.0: + version "7.0.0-beta.0" + resolved "https://registry.yarnpkg.com/babel-plugin-syntax-trailing-function-commas/-/babel-plugin-syntax-trailing-function-commas-7.0.0-beta.0.tgz#aa213c1435e2bffeb6fca842287ef534ad05d5cf" + integrity sha512-Xj9XuRuz3nTSbaTXWv3itLOcxyF4oPD8douBBmj7U9BBC6nEBYfyOJYQMf/8PJAFotC62UY5dFfIGEPr7WswzQ== + +babel-polyfill@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-polyfill/-/babel-polyfill-6.26.0.tgz#379937abc67d7895970adc621f284cd966cf2153" + integrity sha1-N5k3q8Z9eJWXCtxiHyhM2WbPIVM= + dependencies: + babel-runtime "^6.26.0" + core-js "^2.5.0" + regenerator-runtime "^0.10.5" + +babel-preset-fbjs@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/babel-preset-fbjs/-/babel-preset-fbjs-3.3.0.tgz#a6024764ea86c8e06a22d794ca8b69534d263541" + integrity sha512-7QTLTCd2gwB2qGoi5epSULMHugSVgpcVt5YAeiFO9ABLrutDQzKfGwzxgZHLpugq8qMdg/DhRZDZ5CLKxBkEbw== + dependencies: + "@babel/plugin-proposal-class-properties" "^7.0.0" + "@babel/plugin-proposal-object-rest-spread" "^7.0.0" + "@babel/plugin-syntax-class-properties" "^7.0.0" + "@babel/plugin-syntax-flow" "^7.0.0" + "@babel/plugin-syntax-jsx" "^7.0.0" + "@babel/plugin-syntax-object-rest-spread" "^7.0.0" + "@babel/plugin-transform-arrow-functions" "^7.0.0" + "@babel/plugin-transform-block-scoped-functions" "^7.0.0" + "@babel/plugin-transform-block-scoping" "^7.0.0" + "@babel/plugin-transform-classes" "^7.0.0" + "@babel/plugin-transform-computed-properties" "^7.0.0" + "@babel/plugin-transform-destructuring" "^7.0.0" + "@babel/plugin-transform-flow-strip-types" "^7.0.0" + "@babel/plugin-transform-for-of" "^7.0.0" + "@babel/plugin-transform-function-name" "^7.0.0" + "@babel/plugin-transform-literals" "^7.0.0" + "@babel/plugin-transform-member-expression-literals" "^7.0.0" + "@babel/plugin-transform-modules-commonjs" "^7.0.0" + "@babel/plugin-transform-object-super" "^7.0.0" + "@babel/plugin-transform-parameters" "^7.0.0" + "@babel/plugin-transform-property-literals" "^7.0.0" + "@babel/plugin-transform-react-display-name" "^7.0.0" + "@babel/plugin-transform-react-jsx" "^7.0.0" + "@babel/plugin-transform-shorthand-properties" "^7.0.0" + "@babel/plugin-transform-spread" "^7.0.0" + "@babel/plugin-transform-template-literals" "^7.0.0" + babel-plugin-syntax-trailing-function-commas "^7.0.0-beta.0" + +babel-register@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-register/-/babel-register-6.26.0.tgz#6ed021173e2fcb486d7acb45c6009a856f647071" + integrity sha1-btAhFz4vy0htestFxgCahW9kcHE= + dependencies: + babel-core "^6.26.0" + babel-runtime "^6.26.0" + core-js "^2.5.0" + home-or-tmp "^2.0.0" + lodash "^4.17.4" + mkdirp "^0.5.1" + source-map-support "^0.4.15" + +babel-runtime@^6.22.0, babel-runtime@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-runtime/-/babel-runtime-6.26.0.tgz#965c7058668e82b55d7bfe04ff2337bc8b5647fe" + integrity sha1-llxwWGaOgrVde/4E/yM3vItWR/4= + dependencies: + core-js "^2.4.0" + regenerator-runtime "^0.11.0" + +babel-template@^6.24.1, babel-template@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-template/-/babel-template-6.26.0.tgz#de03e2d16396b069f46dd9fff8521fb1a0e35e02" + integrity sha1-3gPi0WOWsGn0bdn/+FIfsaDjXgI= + dependencies: + babel-runtime "^6.26.0" + babel-traverse "^6.26.0" + babel-types "^6.26.0" + babylon "^6.18.0" + lodash "^4.17.4" + +babel-traverse@6.26.0, babel-traverse@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-traverse/-/babel-traverse-6.26.0.tgz#46a9cbd7edcc62c8e5c064e2d2d8d0f4035766ee" + integrity sha1-RqnL1+3MYsjlwGTi0tjQ9ANXZu4= + dependencies: + babel-code-frame "^6.26.0" + babel-messages "^6.23.0" + babel-runtime "^6.26.0" + babel-types "^6.26.0" + babylon "^6.18.0" + debug "^2.6.8" + globals "^9.18.0" + invariant "^2.2.2" + lodash "^4.17.4" + +babel-types@^6.26.0: + version "6.26.0" + resolved "https://registry.yarnpkg.com/babel-types/-/babel-types-6.26.0.tgz#a3b073f94ab49eb6fa55cd65227a334380632497" + integrity sha1-o7Bz+Uq0nrb6Vc1lInozQ4BjJJc= + dependencies: + babel-runtime "^6.26.0" + esutils "^2.0.2" + lodash "^4.17.4" + to-fast-properties "^1.0.3" + +babylon@6.18.0, babylon@^6.18.0: + version "6.18.0" + resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.18.0.tgz#af2f3b88fa6f5c1e4c634d1a0f8eac4f55b395e3" + integrity sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ== + +backo2@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/backo2/-/backo2-1.0.2.tgz#31ab1ac8b129363463e35b3ebb69f4dfcfba7947" + integrity sha1-MasayLEpNjRj41s+u2n038+6eUc= + +backoff@^2.5.0: + version "2.5.0" + resolved "https://registry.yarnpkg.com/backoff/-/backoff-2.5.0.tgz#f616eda9d3e4b66b8ca7fca79f695722c5f8e26f" + integrity sha1-9hbtqdPktmuMp/ynn2lXIsX44m8= + dependencies: + precond "0.2" + +balanced-match@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" + integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== + +base-x@^3.0.2, base-x@^3.0.8: + version "3.0.9" + resolved "https://registry.yarnpkg.com/base-x/-/base-x-3.0.9.tgz#6349aaabb58526332de9f60995e548a53fe21320" + integrity sha512-H7JU6iBHTal1gp56aKoaa//YUxEaAOUiydvrV/pILqIHXTtqxSkATOnDA2u+jZ/61sD+L/412+7kzXRtWukhpQ== + dependencies: + safe-buffer "^5.0.1" + +base64-js@^1.3.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a" + integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA== + +bcrypt-pbkdf@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz#a4301d389b6a43f9b67ff3ca11a3f6637e360e9e" + integrity sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w== + dependencies: + tweetnacl "^0.14.3" + +big.js@^5.2.2: + version "5.2.2" + resolved "https://registry.yarnpkg.com/big.js/-/big.js-5.2.2.tgz#65f0af382f578bcdc742bd9c281e9cb2d7768328" + integrity sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ== + +bignumber.js@^7.2.1: + version "7.2.1" + resolved "https://registry.yarnpkg.com/bignumber.js/-/bignumber.js-7.2.1.tgz#80c048759d826800807c4bfd521e50edbba57a5f" + integrity sha512-S4XzBk5sMB+Rcb/LNcpzXr57VRTxgAvaAEDAl1AwRx27j00hT84O6OkteE7u8UB3NuaaygCRrEpqox4uDOrbdQ== + +bignumber.js@^9.0.0: + version "9.1.0" + resolved "https://registry.yarnpkg.com/bignumber.js/-/bignumber.js-9.1.0.tgz#8d340146107fe3a6cb8d40699643c302e8773b62" + integrity sha512-4LwHK4nfDOraBCtst+wOWIHbu1vhvAPJK8g8nROd4iuc3PSEjWif/qwbkh8jwCJz6yDBvtU4KPynETgrfh7y3A== + +bignumber.js@^9.0.1: + version "9.0.1" + resolved "https://registry.yarnpkg.com/bignumber.js/-/bignumber.js-9.0.1.tgz#8d7ba124c882bfd8e43260c67475518d0689e4e5" + integrity sha512-IdZR9mh6ahOBv/hYGiXyVuyCetmGJhtYkqLBpTStdhEGjegpPlUawydyaF3pbIOFynJTpllEs+NP+CS9jKFLjA== + +binary-extensions@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.2.0.tgz#75f502eeaf9ffde42fc98829645be4ea76bd9e2d" + integrity sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA== + +binary-install-raw@0.0.13: + version "0.0.13" + resolved "https://registry.yarnpkg.com/binary-install-raw/-/binary-install-raw-0.0.13.tgz#43a13c6980eb9844e2932eb7a91a56254f55b7dd" + integrity sha512-v7ms6N/H7iciuk6QInon3/n2mu7oRX+6knJ9xFPsJ3rQePgAqcR3CRTwUheFd8SLbiq4LL7Z4G/44L9zscdt9A== + dependencies: + axios "^0.21.1" + rimraf "^3.0.2" + tar "^6.1.0" + +binaryen@101.0.0-nightly.20210723: + version "101.0.0-nightly.20210723" + resolved "https://registry.yarnpkg.com/binaryen/-/binaryen-101.0.0-nightly.20210723.tgz#b6bb7f3501341727681a03866c0856500eec3740" + integrity sha512-eioJNqhHlkguVSbblHOtLqlhtC882SOEPKmNFZaDuz1hzQjolxZ+eu3/kaS10n3sGPONsIZsO7R9fR00UyhEUA== + +bindings@^1.5.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/bindings/-/bindings-1.5.0.tgz#10353c9e945334bc0511a6d90b38fbc7c9c504df" + integrity sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ== + dependencies: + file-uri-to-path "1.0.0" + +bip66@^1.1.5: + version "1.1.5" + resolved "https://registry.yarnpkg.com/bip66/-/bip66-1.1.5.tgz#01fa8748785ca70955d5011217d1b3139969ca22" + integrity sha512-nemMHz95EmS38a26XbbdxIYj5csHd3RMP3H5bwQknX0WYHF01qhpufP42mLOwVICuH2JmhIhXiWs89MfUGL7Xw== + dependencies: + safe-buffer "^5.0.1" + +bl@^1.0.0: + version "1.2.3" + resolved "https://registry.yarnpkg.com/bl/-/bl-1.2.3.tgz#1e8dd80142eac80d7158c9dccc047fb620e035e7" + integrity sha512-pvcNpa0UU69UT341rO6AYy4FVAIkUHuZXRIWbq+zHnsVcRzDDjIAhGuuYoi0d//cwIwtt4pkpKycWEfjdV+vww== + dependencies: + readable-stream "^2.3.5" + safe-buffer "^5.1.1" + +bl@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/bl/-/bl-3.0.1.tgz#1cbb439299609e419b5a74d7fce2f8b37d8e5c6f" + integrity sha512-jrCW5ZhfQ/Vt07WX1Ngs+yn9BDqPL/gw28S7s9H6QK/gupnizNzJAss5akW20ISgOrbLTlXOOCTJeNUQqruAWQ== + dependencies: + readable-stream "^3.0.1" + +bl@^4.0.3: + version "4.1.0" + resolved "https://registry.yarnpkg.com/bl/-/bl-4.1.0.tgz#451535264182bec2fbbc83a62ab98cf11d9f7b3a" + integrity sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w== + dependencies: + buffer "^5.5.0" + inherits "^2.0.4" + readable-stream "^3.4.0" + +blakejs@^1.1.0: + version "1.2.1" + resolved "https://registry.yarnpkg.com/blakejs/-/blakejs-1.2.1.tgz#5057e4206eadb4a97f7c0b6e197a505042fc3814" + integrity sha512-QXUSXI3QVc/gJME0dBpXrag1kbzOqCjCX8/b54ntNyW6sjtoqxqRk3LTmXzaJoh71zMsDCjM+47jS7XiwN/+fQ== + +bluebird@^3.4.7, bluebird@^3.5.0: + version "3.7.2" + resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.7.2.tgz#9f229c15be272454ffa973ace0dbee79a1b0c36f" + integrity sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg== + +bn.js@4.11.6: + version "4.11.6" + resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-4.11.6.tgz#53344adb14617a13f6e8dd2ce28905d1c0ba3215" + integrity sha512-XWwnNNFCuuSQ0m3r3C4LE3EiORltHd9M05pq6FOlVeiophzRbMo50Sbz1ehl8K3Z+jw9+vmgnXefY1hz8X+2wA== + +bn.js@4.11.8: + version "4.11.8" + resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-4.11.8.tgz#2cde09eb5ee341f484746bb0309b3253b1b1442f" + integrity sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA== + +bn.js@^4.0.0, bn.js@^4.1.0, bn.js@^4.11.0, bn.js@^4.11.1, bn.js@^4.11.6, bn.js@^4.11.8, bn.js@^4.11.9, bn.js@^4.4.0: + version "4.12.0" + resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-4.12.0.tgz#775b3f278efbb9718eec7361f483fb36fbbfea88" + integrity sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA== + +bn.js@^5.0.0, bn.js@^5.1.1, bn.js@^5.1.3: + version "5.2.0" + resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-5.2.0.tgz#358860674396c6997771a9d051fcc1b57d4ae002" + integrity sha512-D7iWRBvnZE8ecXiLj/9wbxH7Tk79fAh8IHaTNq1RWRixsS02W+5qS+iE9yq6RYl0asXx5tw0bLhmT5pIfbSquw== + +bn.js@^5.1.2, bn.js@^5.2.0, bn.js@^5.2.1: + version "5.2.1" + resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-5.2.1.tgz#0bc527a6a0d18d0aa8d5b0538ce4a77dccfa7b70" + integrity sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ== + +body-parser@1.20.1, body-parser@^1.16.0, body-parser@^1.18.3: + version "1.20.1" + resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.20.1.tgz#b1812a8912c195cd371a3ee5e66faa2338a5c668" + integrity sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw== + dependencies: + bytes "3.1.2" + content-type "~1.0.4" + debug "2.6.9" + depd "2.0.0" + destroy "1.2.0" + http-errors "2.0.0" + iconv-lite "0.4.24" + on-finished "2.4.1" + qs "6.11.0" + raw-body "2.5.1" + type-is "~1.6.18" + unpipe "1.0.0" + +boolbase@^1.0.0, boolbase@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/boolbase/-/boolbase-1.0.0.tgz#68dff5fbe60c51eb37725ea9e3ed310dcc1e776e" + integrity sha1-aN/1++YMUes3cl6p4+0xDcwed24= + +borc@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/borc/-/borc-2.1.2.tgz#6ce75e7da5ce711b963755117dd1b187f6f8cf19" + integrity sha512-Sy9eoUi4OiKzq7VovMn246iTo17kzuyHJKomCfpWMlI6RpfN1gk95w7d7gH264nApVLg0HZfcpz62/g4VH1Y4w== + dependencies: + bignumber.js "^9.0.0" + buffer "^5.5.0" + commander "^2.15.0" + ieee754 "^1.1.13" + iso-url "~0.4.7" + json-text-sequence "~0.1.0" + readable-stream "^3.6.0" + +brace-expansion@^1.1.7: + version "1.1.11" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" + integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== + dependencies: + balanced-match "^1.0.0" + concat-map "0.0.1" + +braces@^1.8.2: + version "1.8.5" + resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7" + integrity sha1-uneWLhLf+WnWt2cR6RS3N4V79qc= + dependencies: + expand-range "^1.8.1" + preserve "^0.2.0" + repeat-element "^1.1.2" + +braces@^3.0.1, braces@~3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" + integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== + dependencies: + fill-range "^7.0.1" + +brorand@^1.0.1, brorand@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/brorand/-/brorand-1.1.0.tgz#12c25efe40a45e3c323eb8675a0a0ce57b22371f" + integrity sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8= + +browser-stdout@1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/browser-stdout/-/browser-stdout-1.3.1.tgz#baa559ee14ced73452229bad7326467c61fabd60" + integrity sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw== + +browserify-aes@^1.0.0, browserify-aes@^1.0.4, browserify-aes@^1.0.6, browserify-aes@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/browserify-aes/-/browserify-aes-1.2.0.tgz#326734642f403dabc3003209853bb70ad428ef48" + integrity sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA== + dependencies: + buffer-xor "^1.0.3" + cipher-base "^1.0.0" + create-hash "^1.1.0" + evp_bytestokey "^1.0.3" + inherits "^2.0.1" + safe-buffer "^5.0.1" + +browserify-cipher@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/browserify-cipher/-/browserify-cipher-1.0.1.tgz#8d6474c1b870bfdabcd3bcfcc1934a10e94f15f0" + integrity sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w== + dependencies: + browserify-aes "^1.0.4" + browserify-des "^1.0.0" + evp_bytestokey "^1.0.0" + +browserify-des@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/browserify-des/-/browserify-des-1.0.2.tgz#3af4f1f59839403572f1c66204375f7a7f703e9c" + integrity sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A== + dependencies: + cipher-base "^1.0.1" + des.js "^1.0.0" + inherits "^2.0.1" + safe-buffer "^5.1.2" + +browserify-rsa@^4.0.0, browserify-rsa@^4.0.1: + version "4.1.0" + resolved "https://registry.yarnpkg.com/browserify-rsa/-/browserify-rsa-4.1.0.tgz#b2fd06b5b75ae297f7ce2dc651f918f5be158c8d" + integrity sha512-AdEER0Hkspgno2aR97SAf6vi0y0k8NuOpGnVH3O99rcA5Q6sh8QxcngtHuJ6uXwnfAXNM4Gn1Gb7/MV1+Ymbog== + dependencies: + bn.js "^5.0.0" + randombytes "^2.0.1" + +browserify-sign@^4.0.0: + version "4.2.1" + resolved "https://registry.yarnpkg.com/browserify-sign/-/browserify-sign-4.2.1.tgz#eaf4add46dd54be3bb3b36c0cf15abbeba7956c3" + integrity sha512-/vrA5fguVAKKAVTNJjgSm1tRQDHUU6DbwO9IROu/0WAzC8PKhucDSh18J0RMvVeHAn5puMd+QHC2erPRNf8lmg== + dependencies: + bn.js "^5.1.1" + browserify-rsa "^4.0.1" + create-hash "^1.2.0" + create-hmac "^1.1.7" + elliptic "^6.5.3" + inherits "^2.0.4" + parse-asn1 "^5.1.5" + readable-stream "^3.6.0" + safe-buffer "^5.2.0" + +browserslist@^4.14.5, browserslist@^4.16.3: + version "4.16.3" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.16.3.tgz#340aa46940d7db878748567c5dea24a48ddf3717" + integrity sha512-vIyhWmIkULaq04Gt93txdh+j02yX/JzlyhLYbV3YQCn/zvES3JnY7TifHHvvr1w5hTDluNKMkV05cs4vy8Q7sw== + dependencies: + caniuse-lite "^1.0.30001181" + colorette "^1.2.1" + electron-to-chromium "^1.3.649" + escalade "^3.1.1" + node-releases "^1.1.70" + +bs58@^4.0.0, bs58@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/bs58/-/bs58-4.0.1.tgz#be161e76c354f6f788ae4071f63f34e8c4f0a42a" + integrity sha512-Ok3Wdf5vOIlBrgCvTq96gBkJw+JUEzdBgyaza5HLtPm7yTHkjRy8+JzNyHF7BHa0bNWOQIp3m5YF0nnFcOIKLw== + dependencies: + base-x "^3.0.2" + +bs58check@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/bs58check/-/bs58check-2.1.2.tgz#53b018291228d82a5aa08e7d796fdafda54aebfc" + integrity sha512-0TS1jicxdU09dwJMNZtVAfzPi6Q6QeN0pM1Fkzrjn+XYHvzMKPU3pHVpva+769iNVSfIYWf7LJ6WR+BuuMf8cA== + dependencies: + bs58 "^4.0.0" + create-hash "^1.1.0" + safe-buffer "^5.1.2" + +bser@2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/bser/-/bser-2.1.1.tgz#e6787da20ece9d07998533cfd9de6f5c38f4bc05" + integrity sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ== + dependencies: + node-int64 "^0.4.0" + +btoa@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/btoa/-/btoa-1.2.1.tgz#01a9909f8b2c93f6bf680ba26131eb30f7fa3d73" + integrity sha512-SB4/MIGlsiVkMcHmT+pSmIPoNDoHg+7cMzmt3Uxt628MTz2487DKSqK/fuhFBrkuqrYv5UCEnACpF4dTFNKc/g== + +buffer-alloc-unsafe@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz#bd7dc26ae2972d0eda253be061dba992349c19f0" + integrity sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg== + +buffer-alloc@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/buffer-alloc/-/buffer-alloc-1.2.0.tgz#890dd90d923a873e08e10e5fd51a57e5b7cce0ec" + integrity sha512-CFsHQgjtW1UChdXgbyJGtnm+O/uLQeZdtbDo8mfUgYXCHSM1wgrVxXm6bSyrUuErEb+4sYVGCzASBRot7zyrow== + dependencies: + buffer-alloc-unsafe "^1.1.0" + buffer-fill "^1.0.0" + +buffer-fill@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/buffer-fill/-/buffer-fill-1.0.0.tgz#f8f78b76789888ef39f205cd637f68e702122b2c" + integrity sha512-T7zexNBwiiaCOGDg9xNX9PBmjrubblRkENuptryuI64URkXDFum9il/JGL8Lm8wYfAXpredVXXZz7eMHilimiQ== + +buffer-from@1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.0.tgz#87fcaa3a298358e0ade6e442cfce840740d1ad04" + integrity sha512-c5mRlguI/Pe2dSZmpER62rSCu0ryKmWddzRYsuXc50U2/g8jMOulc31VZMa4mYx31U5xsmSOpDCgH88Vl9cDGQ== + +buffer-from@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.1.tgz#32713bc028f75c02fdb710d7c7bcec1f2c6070ef" + integrity sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A== + +buffer-from@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.2.tgz#2b146a6fd72e80b4f55d255f35ed59a3a9a41bd5" + integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ== + +buffer-to-arraybuffer@^0.0.5: + version "0.0.5" + resolved "https://registry.yarnpkg.com/buffer-to-arraybuffer/-/buffer-to-arraybuffer-0.0.5.tgz#6064a40fa76eb43c723aba9ef8f6e1216d10511a" + integrity sha1-YGSkD6dutDxyOrqe+PbhIW0QURo= + +buffer-xor@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/buffer-xor/-/buffer-xor-1.0.3.tgz#26e61ed1422fb70dd42e6e36729ed51d855fe8d9" + integrity sha512-571s0T7nZWK6vB67HI5dyUF7wXiNcfaPPPTl6zYCNApANjIvYJTg7hlud/+cJpdAhS7dVzqMLmfhfHR3rAcOjQ== + +buffer@^5.0.5, buffer@^5.2.1, buffer@^5.4.2, buffer@^5.4.3, buffer@^5.5.0, buffer@^5.6.0, buffer@^5.7.0: + version "5.7.1" + resolved "https://registry.yarnpkg.com/buffer/-/buffer-5.7.1.tgz#ba62e7c13133053582197160851a8f648e99eed0" + integrity sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ== + dependencies: + base64-js "^1.3.1" + ieee754 "^1.1.13" + +buffer@^6.0.3: + version "6.0.3" + resolved "https://registry.yarnpkg.com/buffer/-/buffer-6.0.3.tgz#2ace578459cc8fbe2a70aaa8f52ee63b6a74c6c6" + integrity sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA== + dependencies: + base64-js "^1.3.1" + ieee754 "^1.2.1" + +bufferutil@^4.0.1: + version "4.0.3" + resolved "https://registry.yarnpkg.com/bufferutil/-/bufferutil-4.0.3.tgz#66724b756bed23cd7c28c4d306d7994f9943cc6b" + integrity sha512-yEYTwGndELGvfXsImMBLop58eaGW+YdONi1fNjTINSY98tmMmFijBG6WXgdkfuLNt4imzQNtIE+eBp1PVpMCSw== + dependencies: + node-gyp-build "^4.2.0" + +builtin-status-codes@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz#85982878e21b98e1c66425e03d0174788f569ee8" + integrity sha512-HpGFw18DgFWlncDfjTa2rcQ4W88O1mC8e8yZ2AvQY5KDaktSTwo+KRf6nHK6FRI5FyRyb/5T6+TSxfP7QyGsmQ== + +busboy@^0.3.1: + version "0.3.1" + resolved "https://registry.yarnpkg.com/busboy/-/busboy-0.3.1.tgz#170899274c5bf38aae27d5c62b71268cd585fd1b" + integrity sha512-y7tTxhGKXcyBxRKAni+awqx8uqaJKrSFSNFSeRG5CsWNdmy2BIK+6VGWEW7TZnIO/533mtMEA4rOevQV815YJw== + dependencies: + dicer "0.3.0" + +bytes@3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.2.tgz#8b0beeb98605adf1b128fa4386403c009e0221a5" + integrity sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg== + +cacheable-request@^6.0.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/cacheable-request/-/cacheable-request-6.1.0.tgz#20ffb8bd162ba4be11e9567d823db651052ca912" + integrity sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg== + dependencies: + clone-response "^1.0.2" + get-stream "^5.1.0" + http-cache-semantics "^4.0.0" + keyv "^3.0.0" + lowercase-keys "^2.0.0" + normalize-url "^4.1.0" + responselike "^1.0.2" + +call-bind@^1.0.0, call-bind@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.2.tgz#b1d4e89e688119c3c9a903ad30abb2f6a919be3c" + integrity sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA== + dependencies: + function-bind "^1.1.1" + get-intrinsic "^1.0.2" + +callsites@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" + integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== + +camel-case@4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/camel-case/-/camel-case-4.1.1.tgz#1fc41c854f00e2f7d0139dfeba1542d6896fe547" + integrity sha512-7fa2WcG4fYFkclIvEmxBbTvmibwF2/agfEBc6q3lOpVu0A13ltLsA+Hr/8Hp6kp5f+G7hKi6t8lys6XxP+1K6Q== + dependencies: + pascal-case "^3.1.1" + tslib "^1.10.0" + +camel-case@4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/camel-case/-/camel-case-4.1.2.tgz#9728072a954f805228225a6deea6b38461e1bd5a" + integrity sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw== + dependencies: + pascal-case "^3.1.2" + tslib "^2.0.3" + +camel-case@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/camel-case/-/camel-case-3.0.0.tgz#ca3c3688a4e9cf3a4cda777dc4dcbc713249cf73" + integrity sha1-yjw2iKTpzzpM2nd9xNy8cTJJz3M= + dependencies: + no-case "^2.2.0" + upper-case "^1.1.1" + +camelcase@^2.0.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-2.1.1.tgz#7c1d16d679a1bbe59ca02cacecfb011e201f5a1f" + integrity sha1-fB0W1nmhu+WcoCys7PsBHiAfWh8= + +camelcase@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-3.0.0.tgz#32fc4b9fcdaf845fcdf7e73bb97cac2261f0ab0a" + integrity sha1-MvxLn82vhF/N9+c7uXysImHwqwo= + +camelcase@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-4.1.0.tgz#d545635be1e33c542649c69173e5de6acfae34dd" + integrity sha1-1UVjW+HjPFQmScaRc+Xeas+uNN0= + +camelcase@^5.0.0, camelcase@^5.3.1: + version "5.3.1" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320" + integrity sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg== + +caniuse-lite@^1.0.30001181: + version "1.0.30001197" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001197.tgz#47ad15b977d2f32b3ec2fe2b087e0c50443771db" + integrity sha512-8aE+sqBqtXz4G8g35Eg/XEaFr2N7rd/VQ6eABGBmNtcB8cN6qNJhMi6oSFy4UWWZgqgL3filHT8Nha4meu3tsw== + +caseless@^0.12.0, caseless@~0.12.0: + version "0.12.0" + resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" + integrity sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw== + +cbor@^5.1.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/cbor/-/cbor-5.2.0.tgz#4cca67783ccd6de7b50ab4ed62636712f287a67c" + integrity sha512-5IMhi9e1QU76ppa5/ajP1BmMWZ2FHkhAhjeVKQ/EFCgYSEaeVaoGtL7cxJskf9oCCk+XjzaIdc3IuU/dbA/o2A== + dependencies: + bignumber.js "^9.0.1" + nofilter "^1.0.4" + +chalk@1.1.3, chalk@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98" + integrity sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg= + dependencies: + ansi-styles "^2.2.1" + escape-string-regexp "^1.0.2" + has-ansi "^2.0.0" + strip-ansi "^3.0.0" + supports-color "^2.0.0" + +chalk@3.0.0, chalk@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-3.0.0.tgz#3f73c2bf526591f574cc492c51e2456349f844e4" + integrity sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg== + dependencies: + ansi-styles "^4.1.0" + supports-color "^7.1.0" + +chalk@^2.0.0, chalk@^2.0.1, chalk@^2.3.2, chalk@^2.4.2: + version "2.4.2" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" + integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== + dependencies: + ansi-styles "^3.2.1" + escape-string-regexp "^1.0.5" + supports-color "^5.3.0" + +chalk@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.0.tgz#4e14870a618d9e2edd97dd8345fd9d9dc315646a" + integrity sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A== + dependencies: + ansi-styles "^4.1.0" + supports-color "^7.1.0" + +change-case@3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/change-case/-/change-case-3.0.2.tgz#fd48746cce02f03f0a672577d1d3a8dc2eceb037" + integrity sha512-Mww+SLF6MZ0U6kdg11algyKd5BARbyM4TbFBepwowYSR5ClfQGCGtxNXgykpN0uF/bstWeaGDT4JWaDh8zWAHA== + dependencies: + camel-case "^3.0.0" + constant-case "^2.0.0" + dot-case "^2.1.0" + header-case "^1.0.0" + is-lower-case "^1.1.0" + is-upper-case "^1.1.0" + lower-case "^1.1.1" + lower-case-first "^1.0.0" + no-case "^2.3.2" + param-case "^2.1.0" + pascal-case "^2.0.0" + path-case "^2.1.0" + sentence-case "^2.1.0" + snake-case "^2.1.0" + swap-case "^1.1.0" + title-case "^2.1.0" + upper-case "^1.1.1" + upper-case-first "^1.1.0" + +checkpoint-store@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/checkpoint-store/-/checkpoint-store-1.1.0.tgz#04e4cb516b91433893581e6d4601a78e9552ea06" + integrity sha1-BOTLUWuRQziTWB5tRgGnjpVS6gY= + dependencies: + functional-red-black-tree "^1.0.1" + +cheerio-select-tmp@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/cheerio-select-tmp/-/cheerio-select-tmp-0.1.1.tgz#55bbef02a4771710195ad736d5e346763ca4e646" + integrity sha512-YYs5JvbpU19VYJyj+F7oYrIE2BOll1/hRU7rEy/5+v9BzkSo3bK81iAeeQEMI92vRIxz677m72UmJUiVwwgjfQ== + dependencies: + css-select "^3.1.2" + css-what "^4.0.0" + domelementtype "^2.1.0" + domhandler "^4.0.0" + domutils "^2.4.4" + +cheerio@0.20.0: + version "0.20.0" + resolved "https://registry.yarnpkg.com/cheerio/-/cheerio-0.20.0.tgz#5c710f2bab95653272842ba01c6ea61b3545ec35" + integrity sha1-XHEPK6uVZTJyhCugHG6mGzVF7DU= + dependencies: + css-select "~1.2.0" + dom-serializer "~0.1.0" + entities "~1.1.1" + htmlparser2 "~3.8.1" + lodash "^4.1.0" + optionalDependencies: + jsdom "^7.0.2" + +cheerio@1.0.0-rc.2: + version "1.0.0-rc.2" + resolved "https://registry.yarnpkg.com/cheerio/-/cheerio-1.0.0-rc.2.tgz#4b9f53a81b27e4d5dac31c0ffd0cfa03cc6830db" + integrity sha1-S59TqBsn5NXawxwP/Qz6A8xoMNs= + dependencies: + css-select "~1.2.0" + dom-serializer "~0.1.0" + entities "~1.1.1" + htmlparser2 "^3.9.1" + lodash "^4.15.0" + parse5 "^3.0.1" + +cheerio@^1.0.0-rc.2: + version "1.0.0-rc.5" + resolved "https://registry.yarnpkg.com/cheerio/-/cheerio-1.0.0-rc.5.tgz#88907e1828674e8f9fee375188b27dadd4f0fa2f" + integrity sha512-yoqps/VCaZgN4pfXtenwHROTp8NG6/Hlt4Jpz2FEP0ZJQ+ZUkVDd0hAPDNKhj3nakpfPt/CNs57yEtxD1bXQiw== + dependencies: + cheerio-select-tmp "^0.1.0" + dom-serializer "~1.2.0" + domhandler "^4.0.0" + entities "~2.1.0" + htmlparser2 "^6.0.0" + parse5 "^6.0.0" + parse5-htmlparser2-tree-adapter "^6.0.0" + +chokidar@3.4.2: + version "3.4.2" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.4.2.tgz#38dc8e658dec3809741eb3ef7bb0a47fe424232d" + integrity sha512-IZHaDeBeI+sZJRX7lGcXsdzgvZqKv6sECqsbErJA4mHWfpRrD8B97kSFN4cQz6nGBGiuFia1MKR4d6c1o8Cv7A== + dependencies: + anymatch "~3.1.1" + braces "~3.0.2" + glob-parent "~5.1.0" + is-binary-path "~2.1.0" + is-glob "~4.0.1" + normalize-path "~3.0.0" + readdirp "~3.4.0" + optionalDependencies: + fsevents "~2.1.2" + +chokidar@3.5.1: + version "3.5.1" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.5.1.tgz#ee9ce7bbebd2b79f49f304799d5468e31e14e68a" + integrity sha512-9+s+Od+W0VJJzawDma/gvBNQqkTiqYTWLuZoyAsivsI4AaWTCzHG06/TMjsf1cYe9Cb97UCEhjz7HvnPk2p/tw== + dependencies: + anymatch "~3.1.1" + braces "~3.0.2" + glob-parent "~5.1.0" + is-binary-path "~2.1.0" + is-glob "~4.0.1" + normalize-path "~3.0.0" + readdirp "~3.5.0" + optionalDependencies: + fsevents "~2.3.1" + +chownr@^1.0.1, chownr@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.1.4.tgz#6fc9d7b42d32a583596337666e7d08084da2cc6b" + integrity sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg== + +chownr@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/chownr/-/chownr-2.0.0.tgz#15bfbe53d2eab4cf70f18a8cd68ebe5b3cb1dece" + integrity sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ== + +cids@^0.7.1, cids@~0.7.0, cids@~0.7.1: + version "0.7.5" + resolved "https://registry.yarnpkg.com/cids/-/cids-0.7.5.tgz#60a08138a99bfb69b6be4ceb63bfef7a396b28b2" + integrity sha512-zT7mPeghoWAu+ppn8+BS1tQ5qGmbMfB4AregnQjA/qHY3GC1m1ptI9GkWNlgeu38r7CuRdXB47uY2XgAYt6QVA== + dependencies: + buffer "^5.5.0" + class-is "^1.1.0" + multibase "~0.6.0" + multicodec "^1.0.0" + multihashes "~0.4.15" + +cids@~0.8.0: + version "0.8.3" + resolved "https://registry.yarnpkg.com/cids/-/cids-0.8.3.tgz#aaf48ac8ed857c3d37dad94d8db1d8c9407b92db" + integrity sha512-yoXTbV3llpm+EBGWKeL9xKtksPE/s6DPoDSY4fn8I8TEW1zehWXPSB0pwAXVDlLaOlrw+sNynj995uD9abmPhA== + dependencies: + buffer "^5.6.0" + class-is "^1.1.0" + multibase "^1.0.0" + multicodec "^1.0.1" + multihashes "^1.0.1" + +cipher-base@^1.0.0, cipher-base@^1.0.1, cipher-base@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/cipher-base/-/cipher-base-1.0.4.tgz#8760e4ecc272f4c363532f926d874aae2c1397de" + integrity sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q== + dependencies: + inherits "^2.0.1" + safe-buffer "^5.0.1" + +class-is@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/class-is/-/class-is-1.1.0.tgz#9d3c0fba0440d211d843cec3dedfa48055005825" + integrity sha512-rhjH9AG1fvabIDoGRVH587413LPjTZgmDF9fOFCbFJQV4yuocX1mHxxvXI4g3cGwbVY9wAYIoKlg1N79frJKQw== + +cli-cursor@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-2.1.0.tgz#b35dac376479facc3e94747d41d0d0f5238ffcb5" + integrity sha1-s12sN2R5+sw+lHR9QdDQ9SOP/LU= + dependencies: + restore-cursor "^2.0.0" + +cli-cursor@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-3.1.0.tgz#264305a7ae490d1d03bf0c9ba7c925d1753af307" + integrity sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw== + dependencies: + restore-cursor "^3.1.0" + +cli-spinners@^2.0.0: + version "2.5.0" + resolved "https://registry.yarnpkg.com/cli-spinners/-/cli-spinners-2.5.0.tgz#12763e47251bf951cb75c201dfa58ff1bcb2d047" + integrity sha512-PC+AmIuK04E6aeSs/pUccSujsTzBhu4HzC2dL+CfJB/Jcc2qTRbEwZQDfIUpt2Xl8BodYBEq8w4fc0kU2I9DjQ== + +cli-spinners@^2.2.0: + version "2.7.0" + resolved "https://registry.yarnpkg.com/cli-spinners/-/cli-spinners-2.7.0.tgz#f815fd30b5f9eaac02db604c7a231ed7cb2f797a" + integrity sha512-qu3pN8Y3qHNgE2AFweciB1IfMnmZ/fsNTEE+NOFjmGB2F/7rLhnhzppvpCnN4FovtP26k8lHyy9ptEbNwWFLzw== + +cli-table3@~0.5.0: + version "0.5.1" + resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.5.1.tgz#0252372d94dfc40dbd8df06005f48f31f656f202" + integrity sha512-7Qg2Jrep1S/+Q3EceiZtQcDPWxhAvBw+ERf1162v4sikJrvojMHFqXt8QIVha8UlH9rgU0BeWPytZ9/TzYqlUw== + dependencies: + object-assign "^4.1.0" + string-width "^2.1.1" + optionalDependencies: + colors "^1.1.2" + +cliui@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-3.2.0.tgz#120601537a916d29940f934da3b48d585a39213d" + integrity sha1-EgYBU3qRbSmUD5NNo7SNWFo5IT0= + dependencies: + string-width "^1.0.1" + strip-ansi "^3.0.1" + wrap-ansi "^2.0.0" + +cliui@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-5.0.0.tgz#deefcfdb2e800784aa34f46fa08e06851c7bbbc5" + integrity sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA== + dependencies: + string-width "^3.1.0" + strip-ansi "^5.2.0" + wrap-ansi "^5.1.0" + +cliui@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-6.0.0.tgz#511d702c0c4e41ca156d7d0e96021f23e13225b1" + integrity sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ== + dependencies: + string-width "^4.2.0" + strip-ansi "^6.0.0" + wrap-ansi "^6.2.0" + +clone-buffer@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/clone-buffer/-/clone-buffer-1.0.0.tgz#e3e25b207ac4e701af721e2cb5a16792cac3dc58" + integrity sha1-4+JbIHrE5wGvch4staFnksrD3Fg= + +clone-response@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/clone-response/-/clone-response-1.0.2.tgz#d1dc973920314df67fbeb94223b4ee350239e96b" + integrity sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws= + dependencies: + mimic-response "^1.0.0" + +clone-stats@^0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/clone-stats/-/clone-stats-0.0.1.tgz#b88f94a82cf38b8791d58046ea4029ad88ca99d1" + integrity sha1-uI+UqCzzi4eR1YBG6kAprYjKmdE= + +clone@2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/clone/-/clone-2.1.1.tgz#d217d1e961118e3ac9a4b8bba3285553bf647cdb" + integrity sha1-0hfR6WERjjrJpLi7oyhVU79kfNs= + +clone@^1.0.0, clone@^1.0.2: + version "1.0.4" + resolved "https://registry.yarnpkg.com/clone/-/clone-1.0.4.tgz#da309cc263df15994c688ca902179ca3c7cd7c7e" + integrity sha1-2jCcwmPfFZlMaIypAheco8fNfH4= + +clone@^2.0.0, clone@^2.1.1: + version "2.1.2" + resolved "https://registry.yarnpkg.com/clone/-/clone-2.1.2.tgz#1b7f4b9f591f1e8f83670401600345a02887435f" + integrity sha1-G39Ln1kfHo+DZwQBYANFoCiHQ18= + +code-point-at@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77" + integrity sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c= + +color-convert@^1.9.0: + version "1.9.3" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" + integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg== + dependencies: + color-name "1.1.3" + +color-convert@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3" + integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ== + dependencies: + color-name "~1.1.4" + +color-logger@0.0.3: + version "0.0.3" + resolved "https://registry.yarnpkg.com/color-logger/-/color-logger-0.0.3.tgz#d9b22dd1d973e166b18bf313f9f481bba4df2018" + integrity sha1-2bIt0dlz4Waxi/MT+fSBu6TfIBg= + +color-logger@0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/color-logger/-/color-logger-0.0.6.tgz#e56245ef29822657110c7cb75a9cd786cb69ed1b" + integrity sha1-5WJF7ymCJlcRDHy3WpzXhstp7Rs= + +color-name@1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" + integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw== + +color-name@~1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" + integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== + +colorette@^1.2.1: + version "1.2.2" + resolved "https://registry.yarnpkg.com/colorette/-/colorette-1.2.2.tgz#cbcc79d5e99caea2dbf10eb3a26fd8b3e6acfa94" + integrity sha512-MKGMzyfeuutC/ZJ1cba9NqcNpfeqMUcYmyF1ZFY6/Cn7CNSAKx6a+s48sqLqyAiZuaP2TcqMhoo+dlwFnVxT9w== + +colors@1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/colors/-/colors-1.3.3.tgz#39e005d546afe01e01f9c4ca8fa50f686a01205d" + integrity sha512-mmGt/1pZqYRjMxB1axhTo16/snVZ5krrKkcmMeVKxzECMMXoCgnvTPp10QgHfcbQZw8Dq2jMNG6je4JlWU0gWg== + +colors@^1.1.2, colors@^1.3.3: + version "1.4.0" + resolved "https://registry.yarnpkg.com/colors/-/colors-1.4.0.tgz#c50491479d4c1bdaed2c9ced32cf7c7dc2360f78" + integrity sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA== + +combined-stream@^1.0.6, combined-stream@^1.0.8, combined-stream@~1.0.6: + version "1.0.8" + resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" + integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg== + dependencies: + delayed-stream "~1.0.0" + +command-exists@^1.2.8: + version "1.2.9" + resolved "https://registry.yarnpkg.com/command-exists/-/command-exists-1.2.9.tgz#c50725af3808c8ab0260fd60b01fbfa25b954f69" + integrity sha512-LTQ/SGc+s0Xc0Fu5WaKnR0YiygZkm9eKFvyS+fRsU7/ZWFF8ykFM6Pc9aCVf1+xasOOZpO3BAVgVrKvsqKHV7w== + +commander@3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/commander/-/commander-3.0.2.tgz#6837c3fb677ad9933d1cfba42dd14d5117d6b39e" + integrity sha512-Gar0ASD4BDyKC4hl4DwHqDrmvjoxWKZigVnAbn5H1owvm4CxCPdb0HQDehwNYMJpla5+M2tPmPARzhtYuwpHow== + +commander@^2.15.0, commander@^2.20.3: + version "2.20.3" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33" + integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ== + +component-emitter@1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.2.1.tgz#137918d6d78283f7df7a6b7c5a63e140e69425e6" + integrity sha1-E3kY1teCg/ffemt8WmPhQOaUJeY= + +concat-map@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg== + +concat-stream@1.5.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.5.1.tgz#f3b80acf9e1f48e3875c0688b41b6c31602eea1c" + integrity sha1-87gKz54fSOOHXAaItBtsMWAu6hw= + dependencies: + inherits "~2.0.1" + readable-stream "~2.0.0" + typedarray "~0.0.5" + +concat-stream@^1.6.0, concat-stream@^1.6.2, concat-stream@~1.6.2: + version "1.6.2" + resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.6.2.tgz#904bdf194cd3122fc675c77fc4ac3d4ff0fd1a34" + integrity sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw== + dependencies: + buffer-from "^1.0.0" + inherits "^2.0.3" + readable-stream "^2.2.2" + typedarray "^0.0.6" + +"concat-stream@github:hugomrdias/concat-stream#feat/smaller": + version "2.0.0" + resolved "https://codeload.github.com/hugomrdias/concat-stream/tar.gz/057bc7b5d6d8df26c8cf00a3f151b6721a0a8034" + dependencies: + inherits "^2.0.3" + readable-stream "^3.0.2" + +configstore@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/configstore/-/configstore-4.0.0.tgz#5933311e95d3687efb592c528b922d9262d227e7" + integrity sha512-CmquAXFBocrzaSM8mtGPMM/HiWmyIpr4CcJl/rgY2uCObZ/S7cKU0silxslqJejl+t/T9HS8E0PUNQD81JGUEQ== + dependencies: + dot-prop "^4.1.0" + graceful-fs "^4.1.2" + make-dir "^1.0.0" + unique-string "^1.0.0" + write-file-atomic "^2.0.0" + xdg-basedir "^3.0.0" + +console-control-strings@^1.0.0, console-control-strings@~1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e" + integrity sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4= + +constant-case@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/constant-case/-/constant-case-2.0.0.tgz#4175764d389d3fa9c8ecd29186ed6005243b6a46" + integrity sha1-QXV2TTidP6nI7NKRhu1gBSQ7akY= + dependencies: + snake-case "^2.1.0" + upper-case "^1.1.1" + +content-disposition@0.5.4: + version "0.5.4" + resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.4.tgz#8b82b4efac82512a02bb0b1dcec9d2c5e8eb5bfe" + integrity sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ== + dependencies: + safe-buffer "5.2.1" + +content-hash@^2.5.2: + version "2.5.2" + resolved "https://registry.yarnpkg.com/content-hash/-/content-hash-2.5.2.tgz#bbc2655e7c21f14fd3bfc7b7d4bfe6e454c9e211" + integrity sha512-FvIQKy0S1JaWV10sMsA7TRx8bpU+pqPkhbsfvOJAdjRXvYxEckAwQWGwtRjiaJfh+E0DvcWUGqcdjwMGFjsSdw== + dependencies: + cids "^0.7.1" + multicodec "^0.5.5" + multihashes "^0.4.15" + +content-type@~1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.4.tgz#e138cc75e040c727b1966fe5e5f8c9aee256fe3b" + integrity sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA== + +convert-source-map@1.X, convert-source-map@^1.5.1, convert-source-map@^1.7.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.7.0.tgz#17a2cb882d7f77d3490585e2ce6c524424a3a442" + integrity sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA== + dependencies: + safe-buffer "~5.1.1" + +cookie-signature@1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c" + integrity sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ== + +cookie@0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.5.0.tgz#d1f5d71adec6558c58f389987c366aa47e994f8b" + integrity sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw== + +cookiejar@^2.1.1: + version "2.1.2" + resolved "https://registry.yarnpkg.com/cookiejar/-/cookiejar-2.1.2.tgz#dd8a235530752f988f9a0844f3fc589e3111125c" + integrity sha512-Mw+adcfzPxcPeI+0WlvRrr/3lGVO0bD75SxX6811cxSh1Wbxx7xZBGK1eVtDf6si8rg2lhnUjsVLMFMfbRIuwA== + +core-js-compat@^3.8.1: + version "3.9.1" + resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.9.1.tgz#4e572acfe90aff69d76d8c37759d21a5c59bb455" + integrity sha512-jXAirMQxrkbiiLsCx9bQPJFA6llDadKMpYrBJQJ3/c4/vsPP/fAf29h24tviRlvwUL6AmY5CHLu2GvjuYviQqA== + dependencies: + browserslist "^4.16.3" + semver "7.0.0" + +core-js-pure@^3.10.2: + version "3.19.1" + resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.19.1.tgz#edffc1fc7634000a55ba05e95b3f0fe9587a5aa4" + integrity sha512-Q0Knr8Es84vtv62ei6/6jXH/7izKmOrtrxH9WJTHLCMAVeU+8TF8z8Nr08CsH4Ot0oJKzBzJJL9SJBYIv7WlfQ== + +core-js@^2.4.0, core-js@^2.5.0: + version "2.6.12" + resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.6.12.tgz#d9333dfa7b065e347cc5682219d6f690859cc2ec" + integrity sha512-Kb2wC0fvsWfQrgk8HU5lW6U/Lcs8+9aaYcy4ZFc6DDlo4nZ7n70dEgE5rtR0oG6ufKDUnrwfWL1mXR5ljDatrQ== + +core-util-is@1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" + integrity sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ== + +core-util-is@~1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.3.tgz#a6042d3634c2b27e9328f837b965fac83808db85" + integrity sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ== + +cors@^2.8.1, cors@^2.8.5: + version "2.8.5" + resolved "https://registry.yarnpkg.com/cors/-/cors-2.8.5.tgz#eac11da51592dd86b9f06f6e7ac293b3df875d29" + integrity sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g== + dependencies: + object-assign "^4" + vary "^1" + +cosmiconfig@6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-6.0.0.tgz#da4fee853c52f6b1e6935f41c1a2fc50bd4a9982" + integrity sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg== + dependencies: + "@types/parse-json" "^4.0.0" + import-fresh "^3.1.0" + parse-json "^5.0.0" + path-type "^4.0.0" + yaml "^1.7.2" + +create-ecdh@^4.0.0: + version "4.0.4" + resolved "https://registry.yarnpkg.com/create-ecdh/-/create-ecdh-4.0.4.tgz#d6e7f4bffa66736085a0762fd3a632684dabcc4e" + integrity sha512-mf+TCx8wWc9VpuxfP2ht0iSISLZnt0JgWlrOKZiNqyUZWnjIaCIVNQArMHnCZKfEYRg6IM7A+NeJoN8gf/Ws0A== + dependencies: + bn.js "^4.1.0" + elliptic "^6.5.3" + +create-hash@^1.1.0, create-hash@^1.1.2, create-hash@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/create-hash/-/create-hash-1.2.0.tgz#889078af11a63756bcfb59bd221996be3a9ef196" + integrity sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg== + dependencies: + cipher-base "^1.0.1" + inherits "^2.0.1" + md5.js "^1.3.4" + ripemd160 "^2.0.1" + sha.js "^2.4.0" + +create-hmac@^1.1.0, create-hmac@^1.1.4, create-hmac@^1.1.7: + version "1.1.7" + resolved "https://registry.yarnpkg.com/create-hmac/-/create-hmac-1.1.7.tgz#69170c78b3ab957147b2b8b04572e47ead2243ff" + integrity sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg== + dependencies: + cipher-base "^1.0.3" + create-hash "^1.1.0" + inherits "^2.0.1" + ripemd160 "^2.0.0" + safe-buffer "^5.0.1" + sha.js "^2.4.8" + +cross-fetch@3.0.6, cross-fetch@^3.0.4: + version "3.0.6" + resolved "https://registry.yarnpkg.com/cross-fetch/-/cross-fetch-3.0.6.tgz#3a4040bc8941e653e0e9cf17f29ebcd177d3365c" + integrity sha512-KBPUbqgFjzWlVcURG+Svp9TlhA5uliYtiNx/0r8nv0pdypeQCRJ9IaSIc3q/x3q8t3F75cHuwxVql1HFGHCNJQ== + dependencies: + node-fetch "2.6.1" + +cross-fetch@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/cross-fetch/-/cross-fetch-1.1.1.tgz#dede6865ae30f37eae62ac90ebb7bdac002b05a0" + integrity sha512-+VJE04+UfxxmBfcnmAu/lKor53RUCx/1ilOti4p+JgrnLQ4AZZIRoe2OEd76VaHyWQmQxqKnV+TaqjHC4r0HWw== + dependencies: + node-fetch "1.7.3" + whatwg-fetch "2.0.3" + +cross-fetch@^2.1.0, cross-fetch@^2.1.1: + version "2.2.3" + resolved "https://registry.yarnpkg.com/cross-fetch/-/cross-fetch-2.2.3.tgz#e8a0b3c54598136e037f8650f8e823ccdfac198e" + integrity sha512-PrWWNH3yL2NYIb/7WF/5vFG3DCQiXDOVf8k3ijatbrtnwNuhMWLC7YF7uqf53tbTFDzHIUD8oITw4Bxt8ST3Nw== + dependencies: + node-fetch "2.1.2" + whatwg-fetch "2.0.4" + +cross-spawn@^7.0.0: + version "7.0.3" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" + integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== + dependencies: + path-key "^3.1.0" + shebang-command "^2.0.0" + which "^2.0.1" + +crypto-browserify@3.12.0: + version "3.12.0" + resolved "https://registry.yarnpkg.com/crypto-browserify/-/crypto-browserify-3.12.0.tgz#396cf9f3137f03e4b8e532c58f698254e00f80ec" + integrity sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg== + dependencies: + browserify-cipher "^1.0.0" + browserify-sign "^4.0.0" + create-ecdh "^4.0.0" + create-hash "^1.1.0" + create-hmac "^1.1.0" + diffie-hellman "^5.0.0" + inherits "^2.0.1" + pbkdf2 "^3.0.3" + public-encrypt "^4.0.0" + randombytes "^2.0.0" + randomfill "^1.0.3" + +crypto-js@^3.1.9-1: + version "3.3.0" + resolved "https://registry.yarnpkg.com/crypto-js/-/crypto-js-3.3.0.tgz#846dd1cce2f68aacfa156c8578f926a609b7976b" + integrity sha512-DIT51nX0dCfKltpRiXV+/TVZq+Qq2NgF4644+K7Ttnla7zEzqc+kjJyiB96BHNyUTBxyjzRcZYpUdZa+QAqi6Q== + +crypto-random-string@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/crypto-random-string/-/crypto-random-string-1.0.0.tgz#a230f64f568310e1498009940790ec99545bca7e" + integrity sha1-ojD2T1aDEOFJgAmUB5DsmVRbyn4= + +css-select@^3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/css-select/-/css-select-3.1.2.tgz#d52cbdc6fee379fba97fb0d3925abbd18af2d9d8" + integrity sha512-qmss1EihSuBNWNNhHjxzxSfJoFBM/lERB/Q4EnsJQQC62R2evJDW481091oAdOr9uh46/0n4nrg0It5cAnj1RA== + dependencies: + boolbase "^1.0.0" + css-what "^4.0.0" + domhandler "^4.0.0" + domutils "^2.4.3" + nth-check "^2.0.0" + +css-select@~1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/css-select/-/css-select-1.2.0.tgz#2b3a110539c5355f1cd8d314623e870b121ec858" + integrity sha1-KzoRBTnFNV8c2NMUYj6HCxIeyFg= + dependencies: + boolbase "~1.0.0" + css-what "2.1" + domutils "1.5.1" + nth-check "~1.0.1" + +css-what@2.1: + version "2.1.3" + resolved "https://registry.yarnpkg.com/css-what/-/css-what-2.1.3.tgz#a6d7604573365fe74686c3f311c56513d88285f2" + integrity sha512-a+EPoD+uZiNfh+5fxw2nO9QwFa6nJe2Or35fGY6Ipw1R3R4AGz1d1TEZrCegvw2YTmZ0jXirGYlzxxpYSHwpEg== + +css-what@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/css-what/-/css-what-4.0.0.tgz#35e73761cab2eeb3d3661126b23d7aa0e8432233" + integrity sha512-teijzG7kwYfNVsUh2H/YN62xW3KK9YhXEgSlbxMlcyjPNvdKJqFx5lrwlJgoFP1ZHlB89iGDlo/JyshKeRhv5A== + +css@2.X: + version "2.2.4" + resolved "https://registry.yarnpkg.com/css/-/css-2.2.4.tgz#c646755c73971f2bba6a601e2cf2fd71b1298929" + integrity sha512-oUnjmWpy0niI3x/mPL8dVEI1l7MnG3+HHyRPHf+YFSbK+svOhXpmSOcDURUh2aOCgl2grzrOPt1nHLuCVFULLw== + dependencies: + inherits "^2.0.3" + source-map "^0.6.1" + source-map-resolve "^0.5.2" + urix "^0.1.0" + +cssfilter@0.0.10: + version "0.0.10" + resolved "https://registry.yarnpkg.com/cssfilter/-/cssfilter-0.0.10.tgz#c6d2672632a2e5c83e013e6864a42ce8defd20ae" + integrity sha1-xtJnJjKi5cg+AT5oZKQs6N79IK4= + +cssom@0.3.x, "cssom@>= 0.3.0 < 0.4.0": + version "0.3.8" + resolved "https://registry.yarnpkg.com/cssom/-/cssom-0.3.8.tgz#9f1276f5b2b463f2114d3f2c75250af8c1a36f4a" + integrity sha512-b0tGHbfegbhPJpxpiBPU2sCkigAqtM9O121le6bbOlgyV+NyGyCmVfJ6QW9eRjz8CpNfWEOYBIMIGRYkLwsIYg== + +"cssstyle@>= 0.2.29 < 0.3.0": + version "0.2.37" + resolved "https://registry.yarnpkg.com/cssstyle/-/cssstyle-0.2.37.tgz#541097234cb2513c83ceed3acddc27ff27987d54" + integrity sha1-VBCXI0yyUTyDzu06zdwn/yeYfVQ= + dependencies: + cssom "0.3.x" + +d@1, d@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/d/-/d-1.0.1.tgz#8698095372d58dbee346ffd0c7093f99f8f9eb5a" + integrity sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA== + dependencies: + es5-ext "^0.10.50" + type "^1.0.1" + +dashdash@^1.12.0: + version "1.14.1" + resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0" + integrity sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g== + dependencies: + assert-plus "^1.0.0" + +dataloader@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/dataloader/-/dataloader-2.0.0.tgz#41eaf123db115987e21ca93c005cd7753c55fe6f" + integrity sha512-YzhyDAwA4TaQIhM5go+vCLmU0UikghC/t9DTQYZR2M/UvZ1MdOhPezSDZcjj9uqQJOMqjLcpWtyW2iNINdlatQ== + +debug-fabulous@0.0.X: + version "0.0.4" + resolved "https://registry.yarnpkg.com/debug-fabulous/-/debug-fabulous-0.0.4.tgz#fa071c5d87484685424807421ca4b16b0b1a0763" + integrity sha1-+gccXYdIRoVCSAdCHKSxawsaB2M= + dependencies: + debug "2.X" + lazy-debug-legacy "0.0.X" + object-assign "4.1.0" + +debug@2.6.9, debug@2.X, debug@^2.2.0, debug@^2.6.8, debug@^2.6.9: + version "2.6.9" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" + integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA== + dependencies: + ms "2.0.0" + +debug@3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/debug/-/debug-3.1.0.tgz#5bb5a0672628b64149566ba16819e61518c67261" + integrity sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g== + dependencies: + ms "2.0.0" + +debug@4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.1.1.tgz#3b72260255109c6b589cee050f1d516139664791" + integrity sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw== + dependencies: + ms "^2.1.1" + +debug@4.3.1: + version "4.3.1" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.1.tgz#f0d229c505e0c6d8c49ac553d1b13dc183f6b2ee" + integrity sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ== + dependencies: + ms "2.1.2" + +debug@^3.1.0, debug@^3.2.6: + version "3.2.7" + resolved "https://registry.yarnpkg.com/debug/-/debug-3.2.7.tgz#72580b7e9145fb39b6676f9c5e5fb100b934179a" + integrity sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ== + dependencies: + ms "^2.1.1" + +debug@^4.1.0, debug@^4.1.1, debug@^4.3.1: + version "4.3.4" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" + integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== + dependencies: + ms "2.1.2" + +decamelize@^1.1.1, decamelize@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" + integrity sha1-9lNNFRSCabIDUue+4m9QH5oZEpA= + +decode-uri-component@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.0.tgz#eb3913333458775cb84cd1a1fae062106bb87545" + integrity sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU= + +decompress-response@^3.2.0, decompress-response@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/decompress-response/-/decompress-response-3.3.0.tgz#80a4dd323748384bfa248083622aedec982adff3" + integrity sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M= + dependencies: + mimic-response "^1.0.0" + +deep-extend@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.6.0.tgz#c4fa7c95404a17a9c3e8ca7e1537312b736330ac" + integrity sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA== + +deep-is@~0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34" + integrity sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ= + +defaults@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/defaults/-/defaults-1.0.3.tgz#c656051e9817d9ff08ed881477f3fe4019f3ef7d" + integrity sha512-s82itHOnYrN0Ib8r+z7laQz3sdE+4FP3d9Q7VLO7U+KRT+CR0GsWuyHxzdAY82I7cXv0G/twrqomTJLOssO5HA== + dependencies: + clone "^1.0.2" + +defer-to-connect@^1.0.1: + version "1.1.3" + resolved "https://registry.yarnpkg.com/defer-to-connect/-/defer-to-connect-1.1.3.tgz#331ae050c08dcf789f8c83a7b81f0ed94f4ac591" + integrity sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ== + +deferred-leveldown@~1.2.1: + version "1.2.2" + resolved "https://registry.yarnpkg.com/deferred-leveldown/-/deferred-leveldown-1.2.2.tgz#3acd2e0b75d1669924bc0a4b642851131173e1eb" + integrity sha512-uukrWD2bguRtXilKt6cAWKyoXrTSMo5m7crUdLfWQmu8kIm88w3QZoUL+6nhpfKVmhHANER6Re3sKoNoZ3IKMA== + dependencies: + abstract-leveldown "~2.6.0" + +deferred-leveldown@~5.0.0: + version "5.0.1" + resolved "https://registry.yarnpkg.com/deferred-leveldown/-/deferred-leveldown-5.0.1.tgz#1642eb18b535dfb2b6ac4d39fb10a9cbcfd13b09" + integrity sha512-BXohsvTedWOLkj2n/TY+yqVlrCWa2Zs8LSxh3uCAgFOru7/pjxKyZAexGa1j83BaKloER4PqUyQ9rGPJLt9bqA== + dependencies: + abstract-leveldown "~6.0.0" + inherits "^2.0.3" + +deferred-leveldown@~5.3.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/deferred-leveldown/-/deferred-leveldown-5.3.0.tgz#27a997ad95408b61161aa69bd489b86c71b78058" + integrity sha512-a59VOT+oDy7vtAbLRCZwWgxu2BaCfd5Hk7wxJd48ei7I+nsg8Orlb9CLG0PMZienk9BSUKgeAqkO2+Lw+1+Ukw== + dependencies: + abstract-leveldown "~6.2.1" + inherits "^2.0.3" + +define-properties@^1.1.2, define-properties@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.1.3.tgz#cf88da6cbee26fe6db7094f61d870cbd84cee9f1" + integrity sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ== + dependencies: + object-keys "^1.0.12" + +delay@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/delay/-/delay-5.0.0.tgz#137045ef1b96e5071060dd5be60bf9334436bd1d" + integrity sha512-ReEBKkIfe4ya47wlPYf/gu5ib6yUG0/Aez0JQZQz94kiWtRQvZIQbTiehsnwHvLSWJnQdhVeqYue7Id1dKr0qw== + +delayed-stream@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" + integrity sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ== + +delegates@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a" + integrity sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o= + +delimit-stream@0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/delimit-stream/-/delimit-stream-0.1.0.tgz#9b8319477c0e5f8aeb3ce357ae305fc25ea1cd2b" + integrity sha512-a02fiQ7poS5CnjiJBAsjGLPp5EwVoGHNeu9sziBd9huppRfsAFIpv5zNLv0V1gbop53ilngAf5Kf331AwcoRBQ== + +depd@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/depd/-/depd-2.0.0.tgz#b696163cc757560d09cf22cc8fad1571b79e76df" + integrity sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw== + +depd@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.2.tgz#9bcd52e14c097763e749b274c4346ed2e560b5a9" + integrity sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak= + +deprecated-decorator@^0.1.6: + version "0.1.6" + resolved "https://registry.yarnpkg.com/deprecated-decorator/-/deprecated-decorator-0.1.6.tgz#00966317b7a12fe92f3cc831f7583af329b86c37" + integrity sha1-AJZjF7ehL+kvPMgx91g68ym4bDc= + +des.js@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/des.js/-/des.js-1.0.1.tgz#5382142e1bdc53f85d86d53e5f4aa7deb91e0843" + integrity sha512-Q0I4pfFrv2VPd34/vfLrFOoRmlYj3OV50i7fskps1jZWK1kApMWWT9G6RRUeYedLcBDIhnSDaUvJMb3AhUlaEA== + dependencies: + inherits "^2.0.1" + minimalistic-assert "^1.0.0" + +destroy@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.2.0.tgz#4803735509ad8be552934c67df614f94e66fa015" + integrity sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg== + +detect-indent@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-4.0.0.tgz#f76d064352cdf43a1cb6ce619c4ee3a9475de208" + integrity sha1-920GQ1LN9Docts5hnE7jqUdd4gg= + dependencies: + repeating "^2.0.0" + +detect-indent@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-5.0.0.tgz#3871cc0a6a002e8c3e5b3cf7f336264675f06b9d" + integrity sha1-OHHMCmoALow+Wzz38zYmRnXwa50= + +detect-libc@^1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/detect-libc/-/detect-libc-1.0.3.tgz#fa137c4bd698edf55cd5cd02ac559f91a4c4ba9b" + integrity sha1-+hN8S9aY7fVc1c0CrFWfkaTEups= + +detect-newline@2.X: + version "2.1.0" + resolved "https://registry.yarnpkg.com/detect-newline/-/detect-newline-2.1.0.tgz#f41f1c10be4b00e87b5f13da680759f2c5bfd3e2" + integrity sha1-9B8cEL5LAOh7XxPaaAdZ8sW/0+I= + +detect-node@^2.0.4: + version "2.1.0" + resolved "https://registry.yarnpkg.com/detect-node/-/detect-node-2.1.0.tgz#c9c70775a49c3d03bc2c06d9a73be550f978f8b1" + integrity sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g== + +dicer@0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/dicer/-/dicer-0.3.0.tgz#eacd98b3bfbf92e8ab5c2fdb71aaac44bb06b872" + integrity sha512-MdceRRWqltEG2dZqO769g27N/3PXfcKl04VhYnBlo2YhH7zPi88VebsjTKclaOyiuMaGU72hTfw3VkUitGcVCA== + dependencies: + streamsearch "0.1.2" + +diff@4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d" + integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== + +diffie-hellman@^5.0.0: + version "5.0.3" + resolved "https://registry.yarnpkg.com/diffie-hellman/-/diffie-hellman-5.0.3.tgz#40e8ee98f55a2149607146921c63e1ae5f3d2875" + integrity sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg== + dependencies: + bn.js "^4.1.0" + miller-rabin "^4.0.0" + randombytes "^2.0.0" + +dir-glob@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f" + integrity sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA== + dependencies: + path-type "^4.0.0" + +docker-compose@0.23.4: + version "0.23.4" + resolved "https://registry.yarnpkg.com/docker-compose/-/docker-compose-0.23.4.tgz#43bcabcde55a6ba2873b52fe0ccd99dd8fdceba8" + integrity sha512-yWdXby9uQ8o4syOfvoSJ9ZlTnLipvUmDn59uaYY5VGIUSUAfMPPGqE1DE3pOCnfSg9Tl9UOOFO0PCSAzuIHmuA== + +docker-modem@^1.0.8: + version "1.0.9" + resolved "https://registry.yarnpkg.com/docker-modem/-/docker-modem-1.0.9.tgz#a1f13e50e6afb6cf3431b2d5e7aac589db6aaba8" + integrity sha512-lVjqCSCIAUDZPAZIeyM125HXfNvOmYYInciphNrLrylUtKyW66meAjSPXWchKVzoIYZx69TPnAepVSSkeawoIw== + dependencies: + JSONStream "1.3.2" + debug "^3.2.6" + readable-stream "~1.0.26-4" + split-ca "^1.0.0" + +dockerode@2.5.8: + version "2.5.8" + resolved "https://registry.yarnpkg.com/dockerode/-/dockerode-2.5.8.tgz#1b661e36e1e4f860e25f56e0deabe9f87f1d0acc" + integrity sha512-+7iOUYBeDTScmOmQqpUYQaE7F4vvIt6+gIZNHWhqAQEI887tiPFB9OvXI/HzQYqfUNvukMK+9myLW63oTJPZpw== + dependencies: + concat-stream "~1.6.2" + docker-modem "^1.0.8" + tar-fs "~1.16.3" + +dom-serializer@0: + version "0.2.2" + resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-0.2.2.tgz#1afb81f533717175d478655debc5e332d9f9bb51" + integrity sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g== + dependencies: + domelementtype "^2.0.1" + entities "^2.0.0" + +dom-serializer@^1.0.1, dom-serializer@~1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-1.2.0.tgz#3433d9136aeb3c627981daa385fc7f32d27c48f1" + integrity sha512-n6kZFH/KlCrqs/1GHMOd5i2fd/beQHuehKdWvNNffbGHTr/almdhuVvTVFb3V7fglz+nC50fFusu3lY33h12pA== + dependencies: + domelementtype "^2.0.1" + domhandler "^4.0.0" + entities "^2.0.0" + +dom-serializer@~0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-0.1.1.tgz#1ec4059e284babed36eec2941d4a970a189ce7c0" + integrity sha512-l0IU0pPzLWSHBcieZbpOKgkIn3ts3vAh7ZuFyXNwJxJXk/c4Gwj9xaTJwIDVQCXawWD0qb3IzMGH5rglQaO0XA== + dependencies: + domelementtype "^1.3.0" + entities "^1.1.1" + +dom-walk@^0.1.0: + version "0.1.2" + resolved "https://registry.yarnpkg.com/dom-walk/-/dom-walk-0.1.2.tgz#0c548bef048f4d1f2a97249002236060daa3fd84" + integrity sha512-6QvTW9mrGeIegrFXdtQi9pk7O/nSK6lSdXW2eqUspN5LWD7UTji2Fqw5V2YLjBpHEoU9Xl/eUWNpDeZvoyOv2w== + +domelementtype@1, domelementtype@^1.3.0, domelementtype@^1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.3.1.tgz#d048c44b37b0d10a7f2a3d5fee3f4333d790481f" + integrity sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w== + +domelementtype@^2.0.1, domelementtype@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-2.1.0.tgz#a851c080a6d1c3d94344aed151d99f669edf585e" + integrity sha512-LsTgx/L5VpD+Q8lmsXSHW2WpA+eBlZ9HPf3erD1IoPF00/3JKHZ3BknUVA2QGDNu69ZNmyFmCWBSO45XjYKC5w== + +domhandler@2.3: + version "2.3.0" + resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-2.3.0.tgz#2de59a0822d5027fabff6f032c2b25a2a8abe738" + integrity sha1-LeWaCCLVAn+r/28DLCsloqir5zg= + dependencies: + domelementtype "1" + +domhandler@^2.3.0: + version "2.4.2" + resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-2.4.2.tgz#8805097e933d65e85546f726d60f5eb88b44f803" + integrity sha512-JiK04h0Ht5u/80fdLMCEmV4zkNh2BcoMFBmZ/91WtYZ8qVXSKjiw7fXMgFPnHcSZgOo3XdinHvmnDUeMf5R4wA== + dependencies: + domelementtype "1" + +domhandler@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-4.0.0.tgz#01ea7821de996d85f69029e81fa873c21833098e" + integrity sha512-KPTbnGQ1JeEMQyO1iYXoagsI6so/C96HZiFyByU3T6iAzpXn8EGEvct6unm1ZGoed8ByO2oirxgwxBmqKF9haA== + dependencies: + domelementtype "^2.1.0" + +domutils@1.5, domutils@1.5.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.5.1.tgz#dcd8488a26f563d61079e48c9f7b7e32373682cf" + integrity sha1-3NhIiib1Y9YQeeSMn3t+Mjc2gs8= + dependencies: + dom-serializer "0" + domelementtype "1" + +domutils@^1.5.1: + version "1.7.0" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.7.0.tgz#56ea341e834e06e6748af7a1cb25da67ea9f8c2a" + integrity sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg== + dependencies: + dom-serializer "0" + domelementtype "1" + +domutils@^2.4.3, domutils@^2.4.4: + version "2.4.4" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-2.4.4.tgz#282739c4b150d022d34699797369aad8d19bbbd3" + integrity sha512-jBC0vOsECI4OMdD0GC9mGn7NXPLb+Qt6KW1YDQzeQYRUFKmNG8lh7mO5HiELfr+lLQE7loDVI4QcAxV80HS+RA== + dependencies: + dom-serializer "^1.0.1" + domelementtype "^2.0.1" + domhandler "^4.0.0" + +dot-case@^2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/dot-case/-/dot-case-2.1.1.tgz#34dcf37f50a8e93c2b3bca8bb7fb9155c7da3bee" + integrity sha1-NNzzf1Co6TwrO8qLt/uRVcfaO+4= + dependencies: + no-case "^2.2.0" + +dot-prop@^4.1.0: + version "4.2.1" + resolved "https://registry.yarnpkg.com/dot-prop/-/dot-prop-4.2.1.tgz#45884194a71fc2cda71cbb4bceb3a4dd2f433ba4" + integrity sha512-l0p4+mIuJIua0mhxGoh4a+iNL9bmeK5DvnSVQa6T0OhrVmaEa1XScX5Etc673FePCJOArq/4Pa2cLGODUWTPOQ== + dependencies: + is-obj "^1.0.0" + +double-ended-queue@2.1.0-0: + version "2.1.0-0" + resolved "https://registry.yarnpkg.com/double-ended-queue/-/double-ended-queue-2.1.0-0.tgz#103d3527fd31528f40188130c841efdd78264e5c" + integrity sha1-ED01J/0xUo9AGIEwyEHv3XgmTlw= + +drbg.js@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/drbg.js/-/drbg.js-1.0.1.tgz#3e36b6c42b37043823cdbc332d58f31e2445480b" + integrity sha512-F4wZ06PvqxYLFEZKkFxTDcns9oFNk34hvmJSEwdzsxVQ8YI5YaxtACgQatkYgv2VI2CFkUd2Y+xosPQnHv809g== + dependencies: + browserify-aes "^1.0.6" + create-hash "^1.1.2" + create-hmac "^1.1.4" + +duplexer3@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/duplexer3/-/duplexer3-0.1.4.tgz#ee01dd1cac0ed3cbc7fdbea37dc0a8f1ce002ce2" + integrity sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI= + +duplexify@^3.2.0: + version "3.7.1" + resolved "https://registry.yarnpkg.com/duplexify/-/duplexify-3.7.1.tgz#2a4df5317f6ccfd91f86d6fd25d8d8a103b88309" + integrity sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g== + dependencies: + end-of-stream "^1.0.0" + inherits "^2.0.1" + readable-stream "^2.0.0" + stream-shift "^1.0.0" + +ecc-jsbn@~0.1.1: + version "0.1.2" + resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz#3a83a904e54353287874c564b7549386849a98c9" + integrity sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw== + dependencies: + jsbn "~0.1.0" + safer-buffer "^2.1.0" + +ee-first@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" + integrity sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow== + +ejs@^2.6.1: + version "2.7.4" + resolved "https://registry.yarnpkg.com/ejs/-/ejs-2.7.4.tgz#48661287573dcc53e366c7a1ae52c3a120eec9ba" + integrity sha512-7vmuyh5+kuUyJKePhQfRQBhXV5Ce+RnaeeQArKu1EAMpL3WbgMt5WG6uQZpEVvYSSsxMXRKOewtDk9RaTKXRlA== + +electron-to-chromium@^1.3.649: + version "1.3.683" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.683.tgz#2c9ab53ff5275cf3dd49278af714d0f8975204f7" + integrity sha512-8mFfiAesXdEdE0DhkMKO7W9U6VU/9T3VTWwZ+4g84/YMP4kgwgFtQgUxuu7FUMcvSeKSNhFQNU+WZ68BQTLT5A== + +elliptic@6.5.3: + version "6.5.3" + resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.3.tgz#cb59eb2efdaf73a0bd78ccd7015a62ad6e0f93d6" + integrity sha512-IMqzv5wNQf+E6aHeIqATs0tOLeOTwj1QKbRcS3jBbYkl5oLAserA8yJTT7/VyHUYG91PRmPyeQDObKLPpeS4dw== + dependencies: + bn.js "^4.4.0" + brorand "^1.0.1" + hash.js "^1.0.0" + hmac-drbg "^1.0.0" + inherits "^2.0.1" + minimalistic-assert "^1.0.0" + minimalistic-crypto-utils "^1.0.0" + +elliptic@6.5.4, elliptic@^6.4.0, elliptic@^6.5.2, elliptic@^6.5.3, elliptic@^6.5.4: + version "6.5.4" + resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.4.tgz#da37cebd31e79a1367e941b592ed1fbebd58abbb" + integrity sha512-iLhC6ULemrljPZb+QutR5TQGB+pdW6KGD5RSegS+8sorOZT+rdQFbsQFJgvN3eRqNALqJer4oQ16YvJHlU8hzQ== + dependencies: + bn.js "^4.11.9" + brorand "^1.1.0" + hash.js "^1.0.0" + hmac-drbg "^1.0.1" + inherits "^2.0.4" + minimalistic-assert "^1.0.1" + minimalistic-crypto-utils "^1.0.1" + +emittery@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/emittery/-/emittery-0.4.1.tgz#abe9d3297389ba424ac87e53d1c701962ce7433d" + integrity sha512-r4eRSeStEGf6M5SKdrQhhLK5bOwOBxQhIE3YSTnZE3GpKiLfnnhE+tPtrJE79+eDJgm39BM6LSoI8SCx4HbwlQ== + +emoji-regex@^7.0.1: + version "7.0.3" + resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-7.0.3.tgz#933a04052860c85e83c122479c4748a8e4c72156" + integrity sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA== + +emoji-regex@^8.0.0: + version "8.0.0" + resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" + integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== + +encodeurl@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59" + integrity sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w== + +encoding-down@^6.3.0: + version "6.3.0" + resolved "https://registry.yarnpkg.com/encoding-down/-/encoding-down-6.3.0.tgz#b1c4eb0e1728c146ecaef8e32963c549e76d082b" + integrity sha512-QKrV0iKR6MZVJV08QY0wp1e7vF6QbhnbQhb07bwpEyuz4uZiZgPlEGdkCROuFkUwdxlFaiPIhjyarH1ee/3vhw== + dependencies: + abstract-leveldown "^6.2.1" + inherits "^2.0.3" + level-codec "^9.0.0" + level-errors "^2.0.0" + +encoding@^0.1.11: + version "0.1.13" + resolved "https://registry.yarnpkg.com/encoding/-/encoding-0.1.13.tgz#56574afdd791f54a8e9b2785c0582a2d26210fa9" + integrity sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A== + dependencies: + iconv-lite "^0.6.2" + +end-of-stream@^1.0.0, end-of-stream@^1.1.0, end-of-stream@^1.4.1: + version "1.4.4" + resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.4.tgz#5ae64a5f45057baf3626ec14da0ca5e4b2431eb0" + integrity sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q== + dependencies: + once "^1.4.0" + +end-stream@~0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/end-stream/-/end-stream-0.1.0.tgz#32003f3f438a2b0143168137f8fa6e9866c81ed5" + integrity sha1-MgA/P0OKKwFDFoE3+PpumGbIHtU= + dependencies: + write-stream "~0.4.3" + +enquirer@2.3.4: + version "2.3.4" + resolved "https://registry.yarnpkg.com/enquirer/-/enquirer-2.3.4.tgz#c608f2e1134c7f68c1c9ee056de13f9b31076de9" + integrity sha512-pkYrrDZumL2VS6VBGDhqbajCM2xpkUNLuKfGPjfKaSIBKYopQbqEFyrOkRMIb2HDR/rO1kGhEt/5twBwtzKBXw== + dependencies: + ansi-colors "^3.2.1" + +entities@1.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/entities/-/entities-1.0.0.tgz#b2987aa3821347fcde642b24fdfc9e4fb712bf26" + integrity sha1-sph6o4ITR/zeZCsk/fyeT7cSvyY= + +entities@^1.1.1, entities@~1.1.1: + version "1.1.2" + resolved "https://registry.yarnpkg.com/entities/-/entities-1.1.2.tgz#bdfa735299664dfafd34529ed4f8522a275fea56" + integrity sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w== + +entities@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/entities/-/entities-2.2.0.tgz#098dc90ebb83d8dffa089d55256b351d34c4da55" + integrity sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A== + +entities@~2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/entities/-/entities-2.1.0.tgz#992d3129cf7df6870b96c57858c249a120f8b8b5" + integrity sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w== + +err-code@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/err-code/-/err-code-1.1.2.tgz#06e0116d3028f6aef4806849eb0ea6a748ae6960" + integrity sha512-CJAN+O0/yA1CKfRn9SXOGctSpEM7DCon/r/5r2eXFMY2zCCJBasFhcM5I+1kh3Ap11FsQCX+vGHceNPvpWKhoA== + +err-code@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/err-code/-/err-code-2.0.3.tgz#23c2f3b756ffdfc608d30e27c9a941024807e7f9" + integrity sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA== + +errno@~0.1.1: + version "0.1.8" + resolved "https://registry.yarnpkg.com/errno/-/errno-0.1.8.tgz#8bb3e9c7d463be4976ff888f76b4809ebc2e811f" + integrity sha512-dJ6oBr5SQ1VSd9qkk7ByRgb/1SH4JZjCHSW/mr63/QcXO9zLVxvJ6Oy13nio03rxpSnVDDjFor75SjVeZWPW/A== + dependencies: + prr "~1.0.1" + +error-ex@^1.2.0, error-ex@^1.3.1: + version "1.3.2" + resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" + integrity sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g== + dependencies: + is-arrayish "^0.2.1" + +es-abstract@^1.17.0-next.1, es-abstract@^1.18.0-next.1, es-abstract@^1.18.0-next.2: + version "1.18.0" + resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.18.0.tgz#ab80b359eecb7ede4c298000390bc5ac3ec7b5a4" + integrity sha512-LJzK7MrQa8TS0ja2w3YNLzUgJCGPdPOV1yVvezjNnS89D+VR08+Szt2mz3YB2Dck/+w5tfIq/RoUAFqJJGM2yw== + dependencies: + call-bind "^1.0.2" + es-to-primitive "^1.2.1" + function-bind "^1.1.1" + get-intrinsic "^1.1.1" + has "^1.0.3" + has-symbols "^1.0.2" + is-callable "^1.2.3" + is-negative-zero "^2.0.1" + is-regex "^1.1.2" + is-string "^1.0.5" + object-inspect "^1.9.0" + object-keys "^1.1.1" + object.assign "^4.1.2" + string.prototype.trimend "^1.0.4" + string.prototype.trimstart "^1.0.4" + unbox-primitive "^1.0.0" + +es-abstract@^1.19.1: + version "1.19.1" + resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.19.1.tgz#d4885796876916959de78edaa0df456627115ec3" + integrity sha512-2vJ6tjA/UfqLm2MPs7jxVybLoB8i1t1Jd9R3kISld20sIxPcTbLuggQOUxeWeAvIUkduv/CfMjuh4WmiXr2v9w== + dependencies: + call-bind "^1.0.2" + es-to-primitive "^1.2.1" + function-bind "^1.1.1" + get-intrinsic "^1.1.1" + get-symbol-description "^1.0.0" + has "^1.0.3" + has-symbols "^1.0.2" + internal-slot "^1.0.3" + is-callable "^1.2.4" + is-negative-zero "^2.0.1" + is-regex "^1.1.4" + is-shared-array-buffer "^1.0.1" + is-string "^1.0.7" + is-weakref "^1.0.1" + object-inspect "^1.11.0" + object-keys "^1.1.1" + object.assign "^4.1.2" + string.prototype.trimend "^1.0.4" + string.prototype.trimstart "^1.0.4" + unbox-primitive "^1.0.1" + +es-array-method-boxes-properly@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/es-array-method-boxes-properly/-/es-array-method-boxes-properly-1.0.0.tgz#873f3e84418de4ee19c5be752990b2e44718d09e" + integrity sha512-wd6JXUmyHmt8T5a2xreUwKcGPq6f1f+WwIJkijUqiGcJz1qqnZgP6XIK+QyIWU5lT7imeNxUll48bziG+TSYcA== + +es-get-iterator@^1.0.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/es-get-iterator/-/es-get-iterator-1.1.2.tgz#9234c54aba713486d7ebde0220864af5e2b283f7" + integrity sha512-+DTO8GYwbMCwbywjimwZMHp8AuYXOS2JZFWoi2AlPOS3ebnII9w/NLpNZtA7A0YLaVDw+O7KFCeoIV7OPvM7hQ== + dependencies: + call-bind "^1.0.2" + get-intrinsic "^1.1.0" + has-symbols "^1.0.1" + is-arguments "^1.1.0" + is-map "^2.0.2" + is-set "^2.0.2" + is-string "^1.0.5" + isarray "^2.0.5" + +es-to-primitive@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.2.1.tgz#e55cd4c9cdc188bcefb03b366c736323fc5c898a" + integrity sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA== + dependencies: + is-callable "^1.1.4" + is-date-object "^1.0.1" + is-symbol "^1.0.2" + +es5-ext@^0.10.35, es5-ext@^0.10.50: + version "0.10.53" + resolved "https://registry.yarnpkg.com/es5-ext/-/es5-ext-0.10.53.tgz#93c5a3acfdbef275220ad72644ad02ee18368de1" + integrity sha512-Xs2Stw6NiNHWypzRTY1MtaG/uJlwCk8kH81920ma8mvN8Xq1gsfhZvpkImLQArw8AHnv8MT2I45J3c0R8slE+Q== + dependencies: + es6-iterator "~2.0.3" + es6-symbol "~3.1.3" + next-tick "~1.0.0" + +es6-denodeify@^0.1.1: + version "0.1.5" + resolved "https://registry.yarnpkg.com/es6-denodeify/-/es6-denodeify-0.1.5.tgz#31d4d5fe9c5503e125460439310e16a2a3f39c1f" + integrity sha1-MdTV/pxVA+ElRgQ5MQ4WoqPznB8= + +es6-iterator@~2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-2.0.3.tgz#a7de889141a05a94b0854403b2d0a0fbfa98f3b7" + integrity sha1-p96IkUGgWpSwhUQDstCg+/qY87c= + dependencies: + d "1" + es5-ext "^0.10.35" + es6-symbol "^3.1.1" + +es6-promise@^4.0.3: + version "4.2.8" + resolved "https://registry.yarnpkg.com/es6-promise/-/es6-promise-4.2.8.tgz#4eb21594c972bc40553d276e510539143db53e0a" + integrity sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w== + +es6-promisify@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/es6-promisify/-/es6-promisify-5.0.0.tgz#5109d62f3e56ea967c4b63505aef08291c8a5203" + integrity sha512-C+d6UdsYDk0lMebHNR4S2NybQMMngAOnOwYBQjTOiv0MkoJMP0Myw2mgpDLBcpfCmRLxyFqYhS/CfOENq4SJhQ== + dependencies: + es6-promise "^4.0.3" + +es6-symbol@^3.1.1, es6-symbol@~3.1.3: + version "3.1.3" + resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-3.1.3.tgz#bad5d3c1bcdac28269f4cb331e431c78ac705d18" + integrity sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA== + dependencies: + d "^1.0.1" + ext "^1.1.2" + +escalade@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.1.1.tgz#d8cfdc7000965c5a0174b4a82eaa5c0552742e40" + integrity sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw== + +escape-html@1.0.3, escape-html@~1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988" + integrity sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg= + +escape-string-regexp@4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34" + integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== + +escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" + integrity sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ= + +escodegen@^1.6.1: + version "1.14.3" + resolved "https://registry.yarnpkg.com/escodegen/-/escodegen-1.14.3.tgz#4e7b81fba61581dc97582ed78cab7f0e8d63f503" + integrity sha512-qFcX0XJkdg+PB3xjZZG/wKSuT1PnQWx57+TVSjIMmILd2yC/6ByYElPwJnslDsuWuSAp4AwJGumarAAmJch5Kw== + dependencies: + esprima "^4.0.1" + estraverse "^4.2.0" + esutils "^2.0.2" + optionator "^0.8.1" + optionalDependencies: + source-map "~0.6.1" + +esdoc@^1.0.4: + version "1.1.0" + resolved "https://registry.yarnpkg.com/esdoc/-/esdoc-1.1.0.tgz#07d40ebf791764cd537929c29111e20a857624f3" + integrity sha512-vsUcp52XJkOWg9m1vDYplGZN2iDzvmjDL5M/Mp8qkoDG3p2s0yIQCIjKR5wfPBaM3eV14a6zhQNYiNTCVzPnxA== + dependencies: + babel-generator "6.26.1" + babel-traverse "6.26.0" + babylon "6.18.0" + cheerio "1.0.0-rc.2" + color-logger "0.0.6" + escape-html "1.0.3" + fs-extra "5.0.0" + ice-cap "0.0.4" + marked "0.3.19" + minimist "1.2.0" + taffydb "2.7.3" + +esprima@^4.0.0, esprima@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" + integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== + +estraverse@^4.2.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-4.3.0.tgz#398ad3f3c5a24948be7725e83d11a7de28cdbd1d" + integrity sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw== + +esutils@^2.0.2: + version "2.0.3" + resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.3.tgz#74d2eb4de0b8da1293711910d50775b9b710ef64" + integrity sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g== + +etag@~1.8.1: + version "1.8.1" + resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.1.tgz#41ae2eeb65efa62268aebfea83ac7d79299b0887" + integrity sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg== + +eth-block-tracker@^4.4.2: + version "4.4.3" + resolved "https://registry.yarnpkg.com/eth-block-tracker/-/eth-block-tracker-4.4.3.tgz#766a0a0eb4a52c867a28328e9ae21353812cf626" + integrity sha512-A8tG4Z4iNg4mw5tP1Vung9N9IjgMNqpiMoJ/FouSFwNCGHv2X0mmOYwtQOJzki6XN7r7Tyo01S29p7b224I4jw== + dependencies: + "@babel/plugin-transform-runtime" "^7.5.5" + "@babel/runtime" "^7.5.5" + eth-query "^2.1.0" + json-rpc-random-id "^1.0.1" + pify "^3.0.0" + safe-event-emitter "^1.0.1" + +eth-ens-namehash@2.0.8, eth-ens-namehash@^2.0.0: + version "2.0.8" + resolved "https://registry.yarnpkg.com/eth-ens-namehash/-/eth-ens-namehash-2.0.8.tgz#229ac46eca86d52e0c991e7cb2aef83ff0f68bcf" + integrity sha1-IprEbsqG1S4MmR58sq74P/D2i88= + dependencies: + idna-uts46-hx "^2.3.1" + js-sha3 "^0.5.7" + +eth-json-rpc-errors@^1.0.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/eth-json-rpc-errors/-/eth-json-rpc-errors-1.1.1.tgz#148377ef55155585981c21ff574a8937f9d6991f" + integrity sha512-WT5shJ5KfNqHi9jOZD+ID8I1kuYWNrigtZat7GOQkvwo99f8SzAVaEcWhJUv656WiZOAg3P1RiJQANtUmDmbIg== + dependencies: + fast-safe-stringify "^2.0.6" + +eth-json-rpc-errors@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/eth-json-rpc-errors/-/eth-json-rpc-errors-2.0.2.tgz#c1965de0301fe941c058e928bebaba2e1285e3c4" + integrity sha512-uBCRM2w2ewusRHGxN8JhcuOb2RN3ueAOYH/0BhqdFmQkZx5lj5+fLKTz0mIVOzd4FG5/kUksCzCD7eTEim6gaA== + dependencies: + fast-safe-stringify "^2.0.6" + +eth-lib@0.2.7: + version "0.2.7" + resolved "https://registry.yarnpkg.com/eth-lib/-/eth-lib-0.2.7.tgz#2f93f17b1e23aec3759cd4a3fe20c1286a3fc1ca" + integrity sha1-L5Pxex4jrsN1nNSj/iDBKGo/wco= + dependencies: + bn.js "^4.11.6" + elliptic "^6.4.0" + xhr-request-promise "^0.1.2" + +eth-lib@0.2.8, eth-lib@^0.2.8: + version "0.2.8" + resolved "https://registry.yarnpkg.com/eth-lib/-/eth-lib-0.2.8.tgz#b194058bef4b220ad12ea497431d6cb6aa0623c8" + integrity sha512-ArJ7x1WcWOlSpzdoTBX8vkwlkSQ85CjjifSZtV4co64vWxSV8geWfPI9x4SVYu3DSxnX4yWFVTtGL+j9DUFLNw== + dependencies: + bn.js "^4.11.6" + elliptic "^6.4.0" + xhr-request-promise "^0.1.2" + +eth-lib@^0.1.26: + version "0.1.29" + resolved "https://registry.yarnpkg.com/eth-lib/-/eth-lib-0.1.29.tgz#0c11f5060d42da9f931eab6199084734f4dbd1d9" + integrity sha512-bfttrr3/7gG4E02HoWTDUcDDslN003OlOoBxk9virpAZQ1ja/jDgwkWB8QfJF7ojuEowrqy+lzp9VcJG7/k5bQ== + dependencies: + bn.js "^4.11.6" + elliptic "^6.4.0" + nano-json-stream-parser "^0.1.2" + servify "^0.1.12" + ws "^3.0.0" + xhr-request-promise "^0.1.2" + +eth-query@^2.1.0, eth-query@^2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/eth-query/-/eth-query-2.1.2.tgz#d6741d9000106b51510c72db92d6365456a6da5e" + integrity sha1-1nQdkAAQa1FRDHLbktY2VFam2l4= + dependencies: + json-rpc-random-id "^1.0.0" + xtend "^4.0.1" + +eth-rpc-errors@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/eth-rpc-errors/-/eth-rpc-errors-3.0.0.tgz#d7b22653c70dbf9defd4ef490fd08fe70608ca10" + integrity sha512-iPPNHPrLwUlR9xCSYm7HHQjWBasor3+KZfRvwEWxMz3ca0yqnlBeJrnyphkGIXZ4J7AMAaOLmwy4AWhnxOiLxg== + dependencies: + fast-safe-stringify "^2.0.6" + +ethereum-bloom-filters@^1.0.6: + version "1.0.10" + resolved "https://registry.yarnpkg.com/ethereum-bloom-filters/-/ethereum-bloom-filters-1.0.10.tgz#3ca07f4aed698e75bd134584850260246a5fed8a" + integrity sha512-rxJ5OFN3RwjQxDcFP2Z5+Q9ho4eIdEmSc2ht0fCu8Se9nbXjZ7/031uXoUYJ87KHCOdVeiUuwSnoS7hmYAGVHA== + dependencies: + js-sha3 "^0.8.0" + +ethereum-common@0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/ethereum-common/-/ethereum-common-0.2.0.tgz#13bf966131cce1eeade62a1b434249bb4cb120ca" + integrity sha512-XOnAR/3rntJgbCdGhqdaLIxDLWKLmsZOGhHdBKadEr6gEnJLH52k93Ou+TUdFaPN3hJc3isBZBal3U/XZ15abA== + +ethereum-common@^0.0.18: + version "0.0.18" + resolved "https://registry.yarnpkg.com/ethereum-common/-/ethereum-common-0.0.18.tgz#2fdc3576f232903358976eb39da783213ff9523f" + integrity sha1-L9w1dvIykDNYl26znaeDIT/5Uj8= + +ethereum-cryptography@^0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/ethereum-cryptography/-/ethereum-cryptography-0.1.3.tgz#8d6143cfc3d74bf79bbd8edecdf29e4ae20dd191" + integrity sha512-w8/4x1SGGzc+tO97TASLja6SLd3fRIK2tLVcV2Gx4IB21hE19atll5Cq9o3d0ZmAYC/8aw0ipieTSiekAea4SQ== + dependencies: + "@types/pbkdf2" "^3.0.0" + "@types/secp256k1" "^4.0.1" + blakejs "^1.1.0" + browserify-aes "^1.2.0" + bs58check "^2.1.2" + create-hash "^1.2.0" + create-hmac "^1.1.7" + hash.js "^1.1.7" + keccak "^3.0.0" + pbkdf2 "^3.0.17" + randombytes "^2.1.0" + safe-buffer "^5.1.2" + scrypt-js "^3.0.0" + secp256k1 "^4.0.1" + setimmediate "^1.0.5" + +ethereum-ens@^0.8.0: + version "0.8.0" + resolved "https://registry.yarnpkg.com/ethereum-ens/-/ethereum-ens-0.8.0.tgz#6d0f79acaa61fdbc87d2821779c4e550243d4c57" + integrity sha512-a8cBTF4AWw1Q1Y37V1LSCS9pRY4Mh3f8vCg5cbXCCEJ3eno1hbI/+Ccv9SZLISYpqQhaglP3Bxb/34lS4Qf7Bg== + dependencies: + bluebird "^3.4.7" + eth-ens-namehash "^2.0.0" + js-sha3 "^0.5.7" + pako "^1.0.4" + underscore "^1.8.3" + web3 "^1.0.0-beta.34" + +ethereum-protocol@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/ethereum-protocol/-/ethereum-protocol-1.0.1.tgz#b7d68142f4105e0ae7b5e178cf42f8d4dc4b93cf" + integrity sha512-3KLX1mHuEsBW0dKG+c6EOJS1NBNqdCICvZW9sInmZTt5aY0oxmHVggYRE0lJu1tcnMD1K+AKHdLi6U43Awm1Vg== + +ethereumjs-abi@^0.6.8: + version "0.6.8" + resolved "https://registry.yarnpkg.com/ethereumjs-abi/-/ethereumjs-abi-0.6.8.tgz#71bc152db099f70e62f108b7cdfca1b362c6fcae" + integrity sha512-Tx0r/iXI6r+lRsdvkFDlut0N08jWMnKRZ6Gkq+Nmw75lZe4e6o3EkSnkaBP5NF6+m5PTGAr9JP43N3LyeoglsA== + dependencies: + bn.js "^4.11.8" + ethereumjs-util "^6.0.0" + +ethereumjs-account@^2.0.3: + version "2.0.5" + resolved "https://registry.yarnpkg.com/ethereumjs-account/-/ethereumjs-account-2.0.5.tgz#eeafc62de544cb07b0ee44b10f572c9c49e00a84" + integrity sha512-bgDojnXGjhMwo6eXQC0bY6UK2liSFUSMwwylOmQvZbSl/D7NXQ3+vrGO46ZeOgjGfxXmgIeVNDIiHw7fNZM4VA== + dependencies: + ethereumjs-util "^5.0.0" + rlp "^2.0.0" + safe-buffer "^5.1.1" + +ethereumjs-block@^1.2.2, ethereumjs-block@^1.6.0: + version "1.7.1" + resolved "https://registry.yarnpkg.com/ethereumjs-block/-/ethereumjs-block-1.7.1.tgz#78b88e6cc56de29a6b4884ee75379b6860333c3f" + integrity sha512-B+sSdtqm78fmKkBq78/QLKJbu/4Ts4P2KFISdgcuZUPDm9x+N7qgBPIIFUGbaakQh8bzuquiRVbdmvPKqbILRg== + dependencies: + async "^2.0.1" + ethereum-common "0.2.0" + ethereumjs-tx "^1.2.2" + ethereumjs-util "^5.0.0" + merkle-patricia-tree "^2.1.2" + +ethereumjs-block@~2.2.0: + version "2.2.2" + resolved "https://registry.yarnpkg.com/ethereumjs-block/-/ethereumjs-block-2.2.2.tgz#c7654be7e22df489fda206139ecd63e2e9c04965" + integrity sha512-2p49ifhek3h2zeg/+da6XpdFR3GlqY3BIEiqxGF8j9aSRIgkb7M1Ky+yULBKJOu8PAZxfhsYA+HxUk2aCQp3vg== + dependencies: + async "^2.0.1" + ethereumjs-common "^1.5.0" + ethereumjs-tx "^2.1.1" + ethereumjs-util "^5.0.0" + merkle-patricia-tree "^2.1.2" + +ethereumjs-common@^1.1.0, ethereumjs-common@^1.3.2, ethereumjs-common@^1.5.0: + version "1.5.2" + resolved "https://registry.yarnpkg.com/ethereumjs-common/-/ethereumjs-common-1.5.2.tgz#2065dbe9214e850f2e955a80e650cb6999066979" + integrity sha512-hTfZjwGX52GS2jcVO6E2sx4YuFnf0Fhp5ylo4pEPhEffNln7vS59Hr5sLnp3/QCazFLluuBZ+FZ6J5HTp0EqCA== + +ethereumjs-tx@^1.0.0, ethereumjs-tx@^1.2.0, ethereumjs-tx@^1.2.2, ethereumjs-tx@^1.3.7: + version "1.3.7" + resolved "https://registry.yarnpkg.com/ethereumjs-tx/-/ethereumjs-tx-1.3.7.tgz#88323a2d875b10549b8347e09f4862b546f3d89a" + integrity sha512-wvLMxzt1RPhAQ9Yi3/HKZTn0FZYpnsmQdbKYfUUpi4j1SEIcbkd9tndVjcPrufY3V7j2IebOpC00Zp2P/Ay2kA== + dependencies: + ethereum-common "^0.0.18" + ethereumjs-util "^5.0.0" + +ethereumjs-tx@^2.1.1: + version "2.1.2" + resolved "https://registry.yarnpkg.com/ethereumjs-tx/-/ethereumjs-tx-2.1.2.tgz#5dfe7688bf177b45c9a23f86cf9104d47ea35fed" + integrity sha512-zZEK1onCeiORb0wyCXUvg94Ve5It/K6GD1K+26KfFKodiBiS6d9lfCXlUKGBBdQ+bv7Day+JK0tj1K+BeNFRAw== + dependencies: + ethereumjs-common "^1.5.0" + ethereumjs-util "^6.0.0" + +ethereumjs-util@^5.0.0, ethereumjs-util@^5.1.1, ethereumjs-util@^5.1.2, ethereumjs-util@^5.1.5: + version "5.2.1" + resolved "https://registry.yarnpkg.com/ethereumjs-util/-/ethereumjs-util-5.2.1.tgz#a833f0e5fca7e5b361384dc76301a721f537bf65" + integrity sha512-v3kT+7zdyCm1HIqWlLNrHGqHGLpGYIhjeHxQjnDXjLT2FyGJDsd3LWMYUo7pAFRrk86CR3nUJfhC81CCoJNNGQ== + dependencies: + bn.js "^4.11.0" + create-hash "^1.1.2" + elliptic "^6.5.2" + ethereum-cryptography "^0.1.3" + ethjs-util "^0.1.3" + rlp "^2.0.0" + safe-buffer "^5.1.1" + +ethereumjs-util@^6.0.0, ethereumjs-util@^6.1.0: + version "6.2.1" + resolved "https://registry.yarnpkg.com/ethereumjs-util/-/ethereumjs-util-6.2.1.tgz#fcb4e4dd5ceacb9d2305426ab1a5cd93e3163b69" + integrity sha512-W2Ktez4L01Vexijrm5EB6w7dg4n/TgpoYU4avuT5T3Vmnw/eCRtiBrJfQYS/DCSvDIOLn2k57GcHdeBcgVxAqw== + dependencies: + "@types/bn.js" "^4.11.3" + bn.js "^4.11.0" + create-hash "^1.1.2" + elliptic "^6.5.2" + ethereum-cryptography "^0.1.3" + ethjs-util "0.1.6" + rlp "^2.2.3" + +ethereumjs-util@^7.0.2: + version "7.0.9" + resolved "https://registry.yarnpkg.com/ethereumjs-util/-/ethereumjs-util-7.0.9.tgz#2038baeb30f370a3e576ec175bd70bbbb6807d42" + integrity sha512-cRqvYYKJoitq6vMKMf8pXeVwvTrX+dRD0JwHaYqm8jvogK14tqIoCWH/KUHcRwnVxVXEYF/o6pup5jRG4V0xzg== + dependencies: + "@types/bn.js" "^5.1.0" + bn.js "^5.1.2" + create-hash "^1.1.2" + ethereum-cryptography "^0.1.3" + ethjs-util "0.1.6" + rlp "^2.2.4" + +ethereumjs-util@^7.1.0: + version "7.1.5" + resolved "https://registry.yarnpkg.com/ethereumjs-util/-/ethereumjs-util-7.1.5.tgz#9ecf04861e4fbbeed7465ece5f23317ad1129181" + integrity sha512-SDl5kKrQAudFBUe5OJM9Ac6WmMyYmXX/6sTmLZ3ffG2eY6ZIGBes3pEDxNN6V72WyOw4CPD5RomKdsa8DAAwLg== + dependencies: + "@types/bn.js" "^5.1.0" + bn.js "^5.1.2" + create-hash "^1.1.2" + ethereum-cryptography "^0.1.3" + rlp "^2.2.4" + +ethereumjs-vm@^2.3.4, ethereumjs-vm@^2.6.0: + version "2.6.0" + resolved "https://registry.yarnpkg.com/ethereumjs-vm/-/ethereumjs-vm-2.6.0.tgz#76243ed8de031b408793ac33907fb3407fe400c6" + integrity sha512-r/XIUik/ynGbxS3y+mvGnbOKnuLo40V5Mj1J25+HEO63aWYREIqvWeRO/hnROlMBE5WoniQmPmhiaN0ctiHaXw== + dependencies: + async "^2.1.2" + async-eventemitter "^0.2.2" + ethereumjs-account "^2.0.3" + ethereumjs-block "~2.2.0" + ethereumjs-common "^1.1.0" + ethereumjs-util "^6.0.0" + fake-merkle-patricia-tree "^1.0.1" + functional-red-black-tree "^1.0.1" + merkle-patricia-tree "^2.3.2" + rustbn.js "~0.2.0" + safe-buffer "^5.1.1" + +ethereumjs-wallet@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/ethereumjs-wallet/-/ethereumjs-wallet-1.0.1.tgz#664a4bcacfc1291ca2703de066df1178938dba1c" + integrity sha512-3Z5g1hG1das0JWU6cQ9HWWTY2nt9nXCcwj7eXVNAHKbo00XAZO8+NHlwdgXDWrL0SXVQMvTWN8Q/82DRH/JhPw== + dependencies: + aes-js "^3.1.1" + bs58check "^2.1.2" + ethereum-cryptography "^0.1.3" + ethereumjs-util "^7.0.2" + randombytes "^2.0.6" + scrypt-js "^3.0.1" + utf8 "^3.0.0" + uuid "^3.3.2" + +ethers@^4.0.32: + version "4.0.48" + resolved "https://registry.yarnpkg.com/ethers/-/ethers-4.0.48.tgz#330c65b8133e112b0613156e57e92d9009d8fbbe" + integrity sha512-sZD5K8H28dOrcidzx9f8KYh8083n5BexIO3+SbE4jK83L85FxtpXZBCQdXb8gkg+7sBqomcLhhkU7UHL+F7I2g== + dependencies: + aes-js "3.0.0" + bn.js "^4.4.0" + elliptic "6.5.3" + hash.js "1.1.3" + js-sha3 "0.5.7" + scrypt-js "2.0.4" + setimmediate "1.0.4" + uuid "2.0.1" + xmlhttprequest "1.8.0" + +ethjs-unit@0.1.6: + version "0.1.6" + resolved "https://registry.yarnpkg.com/ethjs-unit/-/ethjs-unit-0.1.6.tgz#c665921e476e87bce2a9d588a6fe0405b2c41699" + integrity sha512-/Sn9Y0oKl0uqQuvgFk/zQgR7aw1g36qX/jzSQ5lSwlO0GigPymk4eGQfeNTD03w1dPOqfz8V77Cy43jH56pagw== + dependencies: + bn.js "4.11.6" + number-to-bn "1.7.0" + +ethjs-util@0.1.6, ethjs-util@^0.1.3: + version "0.1.6" + resolved "https://registry.yarnpkg.com/ethjs-util/-/ethjs-util-0.1.6.tgz#f308b62f185f9fe6237132fb2a9818866a5cd536" + integrity sha512-CUnVOQq7gSpDHZVVrQW8ExxUETWrnrvXYvYz55wOU8Uj4VCgw56XC2B/fVqQN+f7gmrnRHSLVnFAwsCuNwji8w== + dependencies: + is-hex-prefixed "1.0.0" + strip-hex-prefix "1.0.0" + +event-target-shim@^5.0.0: + version "5.0.1" + resolved "https://registry.yarnpkg.com/event-target-shim/-/event-target-shim-5.0.1.tgz#5d4d3ebdf9583d63a5333ce2deb7480ab2b05789" + integrity sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ== + +eventemitter3@3.1.2, eventemitter3@^3.1.0: + version "3.1.2" + resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-3.1.2.tgz#2d3d48f9c346698fce83a85d7d664e98535df6e7" + integrity sha512-tvtQIeLVHjDkJYnzf2dgVMxfuSGJeM/7UCG17TT4EumTfNtF+0nebF/4zWOIkCreAbtNqhGEboB6BWrwqNaw4Q== + +eventemitter3@4.0.4: + version "4.0.4" + resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.4.tgz#b5463ace635a083d018bdc7c917b4c5f10a85384" + integrity sha512-rlaVLnVxtxvoyLsQQFBx53YmXHDxRIzzTLbdfxqi4yocpSjAxXwkU0cScM5JgSKMqEhrZpnvQ2D9gjylR0AimQ== + +eventemitter3@^4.0.0: + version "4.0.7" + resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.7.tgz#2de9b68f6528d5644ef5c59526a1b4a07306169f" + integrity sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw== + +events@^3.0.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/events/-/events-3.3.0.tgz#31a95ad0a924e2d2c419a813aeb2c4e878ea7400" + integrity sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q== + +eventsource@1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/eventsource/-/eventsource-1.0.7.tgz#8fbc72c93fcd34088090bc0a4e64f4b5cee6d8d0" + integrity sha512-4Ln17+vVT0k8aWq+t/bF5arcS3EpT9gYtW66EPacdj/mAFevznsnyoHLPy2BA8gbIQeIHoPsvwmfBftfcG//BQ== + dependencies: + original "^1.0.0" + +evp_bytestokey@^1.0.0, evp_bytestokey@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz#7fcbdb198dc71959432efe13842684e0525acb02" + integrity sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA== + dependencies: + md5.js "^1.3.4" + safe-buffer "^5.1.1" + +execa@^3.0.0: + version "3.4.0" + resolved "https://registry.yarnpkg.com/execa/-/execa-3.4.0.tgz#c08ed4550ef65d858fac269ffc8572446f37eb89" + integrity sha512-r9vdGQk4bmCuK1yKQu1KTwcT2zwfWdbdaXfCtAh+5nU/4fSX+JAb7vZGvI5naJrQlvONrEB20jeruESI69530g== + dependencies: + cross-spawn "^7.0.0" + get-stream "^5.0.0" + human-signals "^1.1.1" + is-stream "^2.0.0" + merge-stream "^2.0.0" + npm-run-path "^4.0.0" + onetime "^5.1.0" + p-finally "^2.0.0" + signal-exit "^3.0.2" + strip-final-newline "^2.0.0" + +expand-brackets@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-0.1.5.tgz#df07284e342a807cd733ac5af72411e581d1177b" + integrity sha1-3wcoTjQqgHzXM6xa9yQR5YHRF3s= + dependencies: + is-posix-bracket "^0.1.0" + +expand-range@^1.8.1: + version "1.8.2" + resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-1.8.2.tgz#a299effd335fe2721ebae8e257ec79644fc85337" + integrity sha1-opnv/TNf4nIeuujiV+x5ZE/IUzc= + dependencies: + fill-range "^2.1.0" + +explain-error@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/explain-error/-/explain-error-1.0.4.tgz#a793d3ac0cad4c6ab571e9968fbbab6cb2532929" + integrity sha512-/wSgNMxFusiYRy1rd19LT2SQlIXDppHpumpWo06wxjflD1OYxDLbl6rMVw+U3bxD5Nuhex4TKqv9Aem4D0lVzQ== + +express@^4.0.0, express@^4.14.0, express@^4.17.1: + version "4.18.2" + resolved "https://registry.yarnpkg.com/express/-/express-4.18.2.tgz#3fabe08296e930c796c19e3c516979386ba9fd59" + integrity sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ== + dependencies: + accepts "~1.3.8" + array-flatten "1.1.1" + body-parser "1.20.1" + content-disposition "0.5.4" + content-type "~1.0.4" + cookie "0.5.0" + cookie-signature "1.0.6" + debug "2.6.9" + depd "2.0.0" + encodeurl "~1.0.2" + escape-html "~1.0.3" + etag "~1.8.1" + finalhandler "1.2.0" + fresh "0.5.2" + http-errors "2.0.0" + merge-descriptors "1.0.1" + methods "~1.1.2" + on-finished "2.4.1" + parseurl "~1.3.3" + path-to-regexp "0.1.7" + proxy-addr "~2.0.7" + qs "6.11.0" + range-parser "~1.2.1" + safe-buffer "5.2.1" + send "0.18.0" + serve-static "1.15.0" + setprototypeof "1.2.0" + statuses "2.0.1" + type-is "~1.6.18" + utils-merge "1.0.1" + vary "~1.1.2" + +ext@^1.1.2: + version "1.4.0" + resolved "https://registry.yarnpkg.com/ext/-/ext-1.4.0.tgz#89ae7a07158f79d35517882904324077e4379244" + integrity sha512-Key5NIsUxdqKg3vIsdw9dSuXpPCQ297y6wBjL30edxwPgt2E44WcWBZey/ZvUc6sERLTxKdyCu4gZFmUbk1Q7A== + dependencies: + type "^2.0.0" + +extend-shallow@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/extend-shallow/-/extend-shallow-2.0.1.tgz#51af7d614ad9a9f610ea1bafbb989d6b1c56890f" + integrity sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8= + dependencies: + is-extendable "^0.1.0" + +extend@^3.0.0, extend@~3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" + integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g== + +extglob@^0.3.1: + version "0.3.2" + resolved "https://registry.yarnpkg.com/extglob/-/extglob-0.3.2.tgz#2e18ff3d2f49ab2765cec9023f011daa8d8349a1" + integrity sha1-Lhj/PS9JqydlzskCPwEdqo2DSaE= + dependencies: + is-extglob "^1.0.0" + +extract-files@9.0.0, extract-files@^9.0.0: + version "9.0.0" + resolved "https://registry.yarnpkg.com/extract-files/-/extract-files-9.0.0.tgz#8a7744f2437f81f5ed3250ed9f1550de902fe54a" + integrity sha512-CvdFfHkC95B4bBBk36hcEmvdR2awOdhhVUYH6S/zrVj3477zven/fJMYg7121h4T1xHZC+tetUpubpAhxwI7hQ== + +extsprintf@1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.3.0.tgz#96918440e3041a7a414f8c52e3c574eb3c3e1e05" + integrity sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g== + +extsprintf@^1.2.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.4.1.tgz#8d172c064867f235c0c84a596806d279bf4bcc07" + integrity sha512-Wrk35e8ydCKDj/ArClo1VrPVmN8zph5V4AtHwIuHhvMXsKf73UT3BOD+azBIW+3wOJ4FhEH7zyaJCFvChjYvMA== + +eyes@^0.1.8: + version "0.1.8" + resolved "https://registry.yarnpkg.com/eyes/-/eyes-0.1.8.tgz#62cf120234c683785d902348a800ef3e0cc20bc0" + integrity sha512-GipyPsXO1anza0AOZdy69Im7hGFCNB7Y/NGjDlZGJ3GJJLtwNSb2vrzYrTYJRrRloVx7pl+bhUaTB8yiccPvFQ== + +fake-merkle-patricia-tree@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/fake-merkle-patricia-tree/-/fake-merkle-patricia-tree-1.0.1.tgz#4b8c3acfb520afadf9860b1f14cd8ce3402cddd3" + integrity sha1-S4w6z7Ugr635hgsfFM2M40As3dM= + dependencies: + checkpoint-store "^1.1.0" + +faker@^5.3.1: + version "5.4.0" + resolved "https://registry.yarnpkg.com/faker/-/faker-5.4.0.tgz#f18e55993c6887918182b003d163df14daeb3011" + integrity sha512-Y9n/Ky/xZx/Bj8DePvXspUYRtHl/rGQytoIT5LaxmNwSe3wWyOeOXb3lT6Dpipq240PVpeFaGKzScz/5fvff2g== + +fast-check@^2.12.1: + version "2.13.0" + resolved "https://registry.yarnpkg.com/fast-check/-/fast-check-2.13.0.tgz#92a50a6a39b58760d4b0b52b12f98f28a9f020f6" + integrity sha512-IOfzKm/SCA+jpUEgAfqAuxHYPmgtmpnnwljQmYPRGrqYczcTKApXKHza/SNxFxYkecWfZilYa0DJdBvqz1bcSw== + dependencies: + pure-rand "^4.1.1" + +fast-deep-equal@^3.1.1: + version "3.1.3" + resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" + integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== + +fast-future@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/fast-future/-/fast-future-1.0.2.tgz#8435a9aaa02d79248d17d704e76259301d99280a" + integrity sha1-hDWpqqAteSSNF9cE52JZMB2ZKAo= + +fast-glob@^3.1.1: + version "3.2.5" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.2.5.tgz#7939af2a656de79a4f1901903ee8adcaa7cb9661" + integrity sha512-2DtFcgT68wiTTiwZ2hNdJfcHNke9XOfnwmBRWXhmeKM8rF0TGwmC/Qto3S7RoZKp5cilZbxzO5iTNTQsJ+EeDg== + dependencies: + "@nodelib/fs.stat" "^2.0.2" + "@nodelib/fs.walk" "^1.2.3" + glob-parent "^5.1.0" + merge2 "^1.3.0" + micromatch "^4.0.2" + picomatch "^2.2.1" + +fast-json-stable-stringify@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" + integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== + +fast-levenshtein@~2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" + integrity sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc= + +fast-safe-stringify@^2.0.6: + version "2.0.7" + resolved "https://registry.yarnpkg.com/fast-safe-stringify/-/fast-safe-stringify-2.0.7.tgz#124aa885899261f68aedb42a7c080de9da608743" + integrity sha512-Utm6CdzT+6xsDk2m8S6uL8VHxNwI6Jub+e9NYTcAms28T84pTa25GJQV9j0CY0N1rM8hK4x6grpF2BQf+2qwVA== + +fastq@^1.6.0: + version "1.11.0" + resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.11.0.tgz#bb9fb955a07130a918eb63c1f5161cc32a5d0858" + integrity sha512-7Eczs8gIPDrVzT+EksYBcupqMyxSHXXrHOLRRxU2/DicV8789MRBRR8+Hc2uWzUupOs4YS4JzBmBxjjCVBxD/g== + dependencies: + reusify "^1.0.4" + +fb-watchman@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/fb-watchman/-/fb-watchman-2.0.1.tgz#fc84fb39d2709cf3ff6d743706157bb5708a8a85" + integrity sha512-DkPJKQeY6kKwmuMretBhr7G6Vodr7bFwDYTXIkfG1gjvNpaxBTQV3PbXg6bR1c1UP4jPOX0jHUbbHANL9vRjVg== + dependencies: + bser "2.1.1" + +fbjs-css-vars@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/fbjs-css-vars/-/fbjs-css-vars-1.0.2.tgz#216551136ae02fe255932c3ec8775f18e2c078b8" + integrity sha512-b2XGFAFdWZWg0phtAWLHCk836A1Xann+I+Dgd3Gk64MHKZO44FfoD1KxyvbSh0qZsIoXQGGlVztIY+oitJPpRQ== + +fbjs@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/fbjs/-/fbjs-3.0.0.tgz#0907067fb3f57a78f45d95f1eacffcacd623c165" + integrity sha512-dJd4PiDOFuhe7vk4F80Mba83Vr2QuK86FoxtgPmzBqEJahncp+13YCmfoa53KHCo6OnlXLG7eeMWPfB5CrpVKg== + dependencies: + cross-fetch "^3.0.4" + fbjs-css-vars "^1.0.0" + loose-envify "^1.0.0" + object-assign "^4.1.0" + promise "^7.1.1" + setimmediate "^1.0.5" + ua-parser-js "^0.7.18" + +fetch-cookie@0.10.1: + version "0.10.1" + resolved "https://registry.yarnpkg.com/fetch-cookie/-/fetch-cookie-0.10.1.tgz#5ea88f3d36950543c87997c27ae2aeafb4b5c4d4" + integrity sha512-beB+VEd4cNeVG1PY+ee74+PkuCQnik78pgLi5Ah/7qdUfov8IctU0vLUbBT8/10Ma5GMBeI4wtxhGrEfKNYs2g== + dependencies: + tough-cookie "^2.3.3 || ^3.0.1 || ^4.0.0" + +fetch-cookie@0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/fetch-cookie/-/fetch-cookie-0.7.0.tgz#a6fc137ad8363aa89125864c6451b86ecb7de802" + integrity sha512-Mm5pGlT3agW6t71xVM7vMZPIvI7T4FaTuFW4jari6dVzYHFDb3WZZsGpN22r/o3XMdkM0E7sPd1EGeyVbH2Tgg== + dependencies: + es6-denodeify "^0.1.1" + tough-cookie "^2.3.1" + +fetch-ponyfill@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/fetch-ponyfill/-/fetch-ponyfill-4.1.0.tgz#ae3ce5f732c645eab87e4ae8793414709b239893" + integrity sha1-rjzl9zLGReq4fkroeTQUcJsjmJM= + dependencies: + node-fetch "~1.7.1" + +file-uri-to-path@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz#553a7b8446ff6f684359c445f1e37a05dacc33dd" + integrity sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw== + +filename-regex@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.1.tgz#c1c4b9bee3e09725ddb106b75c1e301fe2f18b26" + integrity sha1-wcS5vuPglyXdsQa3XB4wH+LxiyY= + +fill-range@^2.1.0: + version "2.2.4" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-2.2.4.tgz#eb1e773abb056dcd8df2bfdf6af59b8b3a936565" + integrity sha512-cnrcCbj01+j2gTG921VZPnHbjmdAf8oQV/iGeV2kZxGSyfYjjTyY79ErsK1WJWMpw6DaApEX72binqJE+/d+5Q== + dependencies: + is-number "^2.1.0" + isobject "^2.0.0" + randomatic "^3.0.0" + repeat-element "^1.1.2" + repeat-string "^1.5.2" + +fill-range@^7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" + integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== + dependencies: + to-regex-range "^5.0.1" + +finalhandler@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.2.0.tgz#7d23fe5731b207b4640e4fcd00aec1f9207a7b32" + integrity sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg== + dependencies: + debug "2.6.9" + encodeurl "~1.0.2" + escape-html "~1.0.3" + on-finished "2.4.1" + parseurl "~1.3.3" + statuses "2.0.1" + unpipe "~1.0.0" + +find-up@5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-5.0.0.tgz#4c92819ecb7083561e4f4a240a86be5198f536fc" + integrity sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng== + dependencies: + locate-path "^6.0.0" + path-exists "^4.0.0" + +find-up@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-1.1.2.tgz#6b2e9822b1a2ce0a60ab64d610eccad53cb24d0f" + integrity sha1-ay6YIrGizgpgq2TWEOzK1TyyTQ8= + dependencies: + path-exists "^2.0.0" + pinkie-promise "^2.0.0" + +find-up@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-2.1.0.tgz#45d1b7e506c717ddd482775a2b77920a3c0c57a7" + integrity sha1-RdG35QbHF93UgndaK3eSCjwMV6c= + dependencies: + locate-path "^2.0.0" + +find-up@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-3.0.0.tgz#49169f1d7993430646da61ecc5ae355c21c97b73" + integrity sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg== + dependencies: + locate-path "^3.0.0" + +find-up@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-4.1.0.tgz#97afe7d6cdc0bc5928584b7c8d7b16e8a9aa5d19" + integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw== + dependencies: + locate-path "^5.0.0" + path-exists "^4.0.0" + +first-chunk-stream@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/first-chunk-stream/-/first-chunk-stream-1.0.0.tgz#59bfb50cd905f60d7c394cd3d9acaab4e6ad934e" + integrity sha1-Wb+1DNkF9g18OUzT2ayqtOatk04= + +flat@^4.1.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/flat/-/flat-4.1.1.tgz#a392059cc382881ff98642f5da4dde0a959f309b" + integrity sha512-FmTtBsHskrU6FJ2VxCnsDb84wu9zhmO3cUX2kGFb5tuwhfXxGciiT0oRY+cck35QmG+NmGh5eLz6lLCpWTqwpA== + dependencies: + is-buffer "~2.0.3" + +flatmap@0.0.3: + version "0.0.3" + resolved "https://registry.yarnpkg.com/flatmap/-/flatmap-0.0.3.tgz#1f18a4d938152d495965f9c958d923ab2dd669b4" + integrity sha512-OuR+o7kHVe+x9RtIujPay7Uw3bvDZBZFSBXClEphZuSDLmZTqMdclasf4vFSsogC8baDz0eaC2NdO/2dlXHBKQ== + +follow-redirects@^1.12.1: + version "1.14.8" + resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.14.8.tgz#016996fb9a11a100566398b1c6839337d7bfa8fc" + integrity sha512-1x0S9UVJHsQprFcEC/qnNzBLcIxsjAV905f/UkQxbclCsoTWlacCNOpQa/anodLl2uaEKFhfWOvM2Qg77+15zA== + +follow-redirects@^1.14.0: + version "1.15.1" + resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.1.tgz#0ca6a452306c9b276e4d3127483e29575e207ad5" + integrity sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA== + +for-each@^0.3.3: + version "0.3.3" + resolved "https://registry.yarnpkg.com/for-each/-/for-each-0.3.3.tgz#69b447e88a0a5d32c3e7084f3f1710034b21376e" + integrity sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw== + dependencies: + is-callable "^1.1.3" + +for-in@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80" + integrity sha1-gQaNKVqBQuwKxybG4iAMMPttXoA= + +for-own@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce" + integrity sha1-UmXGgaTylNq78XyVCbZ2OqhFEM4= + dependencies: + for-in "^1.0.1" + +foreach@^2.0.4, foreach@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/foreach/-/foreach-2.0.5.tgz#0bee005018aeb260d0a3af3ae658dd0136ec1b99" + integrity sha1-C+4AUBiusmDQo6865ljdATbsG5k= + +forever-agent@~0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" + integrity sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw== + +form-data@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-3.0.0.tgz#31b7e39c85f1355b7139ee0c647cf0de7f83c682" + integrity sha512-CKMFDglpbMi6PyN+brwB9Q/GOw0eAnsrEZDgcsH5Krhz5Od/haKHAX0NmQfha2zPPz0JpWzA7GJHGSnvCRLWsg== + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.8" + mime-types "^2.1.12" + +form-data@4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.0.tgz#93919daeaf361ee529584b9b31664dc12c9fa452" + integrity sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww== + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.8" + mime-types "^2.1.12" + +form-data@^2.2.0: + version "2.5.1" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.5.1.tgz#f2cbec57b5e59e23716e128fe44d4e5dd23895f4" + integrity sha512-m21N3WOmEEURgk6B9GLOE4RuWOFf28Lhh9qGYeNlGq4VDXUlJy2th2slBNU8Gp8EzloYZOibZJ7t5ecIrFSjVA== + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.6" + mime-types "^2.1.12" + +form-data@~2.3.2: + version "2.3.3" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.3.3.tgz#dcce52c05f644f298c6a7ab936bd724ceffbf3a6" + integrity sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ== + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.6" + mime-types "^2.1.12" + +forwarded@0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.2.0.tgz#2269936428aad4c15c7ebe9779a84bf0b2a81811" + integrity sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow== + +fresh@0.5.2: + version "0.5.2" + resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.2.tgz#3d8cadd90d976569fa835ab1f8e4b23a105605a7" + integrity sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q== + +fs-capacitor@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/fs-capacitor/-/fs-capacitor-2.0.4.tgz#5a22e72d40ae5078b4fe64fe4d08c0d3fc88ad3c" + integrity sha512-8S4f4WsCryNw2mJJchi46YgB6CR5Ze+4L1h8ewl9tEpL4SJ3ZO+c/bS4BWhB8bK+O3TMqhuZarTitd0S0eh2pA== + +fs-capacitor@^6.1.0: + version "6.2.0" + resolved "https://registry.yarnpkg.com/fs-capacitor/-/fs-capacitor-6.2.0.tgz#fa79ac6576629163cb84561995602d8999afb7f5" + integrity sha512-nKcE1UduoSKX27NSZlg879LdQc94OtbOsEmKMN2MBNudXREvijRKx2GEBsTMTfws+BrbkJoEuynbGSVRSpauvw== + +fs-constants@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fs-constants/-/fs-constants-1.0.0.tgz#6be0de9be998ce16af8afc24497b9ee9b7ccd9ad" + integrity sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow== + +fs-extra@5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-5.0.0.tgz#414d0110cdd06705734d055652c5411260c31abd" + integrity sha512-66Pm4RYbjzdyeuqudYqhFiNBbCIuI9kgRqLPSHIlXHidW8NIQtVdkM1yeZ4lXwuhbTETv3EUGMNHAAw6hiundQ== + dependencies: + graceful-fs "^4.1.2" + jsonfile "^4.0.0" + universalify "^0.1.0" + +fs-extra@9.0.0: + version "9.0.0" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-9.0.0.tgz#b6afc31036e247b2466dc99c29ae797d5d4580a3" + integrity sha512-pmEYSk3vYsG/bF651KPUXZ+hvjpgWYw/Gc7W9NFUe3ZVLczKKWIij3IKpOrQcdw4TILtibFslZ0UmR8Vvzig4g== + dependencies: + at-least-node "^1.0.0" + graceful-fs "^4.2.0" + jsonfile "^6.0.1" + universalify "^1.0.0" + +fs-extra@^0.30.0: + version "0.30.0" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.30.0.tgz#f233ffcc08d4da7d432daa449776989db1df93f0" + integrity sha1-8jP/zAjU2n1DLapEl3aYnbHfk/A= + dependencies: + graceful-fs "^4.1.2" + jsonfile "^2.1.0" + klaw "^1.0.0" + path-is-absolute "^1.0.0" + rimraf "^2.2.8" + +fs-extra@^4.0.2: + version "4.0.3" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-4.0.3.tgz#0d852122e5bc5beb453fb028e9c0c9bf36340c94" + integrity sha512-q6rbdDd1o2mAnQreO7YADIxf/Whx4AHBiRf6d+/cVT8h44ss+lHgxf1FemcqDnQt9X3ct4McHr+JMGlYSsK7Cg== + dependencies: + graceful-fs "^4.1.2" + jsonfile "^4.0.0" + universalify "^0.1.0" + +fs-extra@^9.1.0: + version "9.1.0" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-9.1.0.tgz#5954460c764a8da2094ba3554bf839e6b9a7c86d" + integrity sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ== + dependencies: + at-least-node "^1.0.0" + graceful-fs "^4.2.0" + jsonfile "^6.0.1" + universalify "^2.0.0" + +fs-jetpack@^2.2.2: + version "2.4.0" + resolved "https://registry.yarnpkg.com/fs-jetpack/-/fs-jetpack-2.4.0.tgz#6080c4ab464a019d37a404baeb47f32af8835026" + integrity sha512-S/o9Dd7K9A7gicVU32eT8G0kHcmSu0rCVdP79P0MWInKFb8XpTc8Syhoo66k9no+HDshtlh4pUJTws8X+8fdFQ== + dependencies: + minimatch "^3.0.2" + rimraf "^2.6.3" + +fs-minipass@^1.2.7: + version "1.2.7" + resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-1.2.7.tgz#ccff8570841e7fe4265693da88936c55aed7f7c7" + integrity sha512-GWSSJGFy4e9GUeCcbIkED+bgAoFyj7XF1mV8rma3QW4NIqX9Kyx79N/PF61H5udOV3aY1IaMLs6pGbH71nlCTA== + dependencies: + minipass "^2.6.0" + +fs-minipass@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-2.1.0.tgz#7f5036fdbf12c63c169190cbe4199c852271f9fb" + integrity sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg== + dependencies: + minipass "^3.0.0" + +fs.realpath@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" + integrity sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw== + +fsevents@~2.1.2: + version "2.1.3" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.1.3.tgz#fb738703ae8d2f9fe900c33836ddebee8b97f23e" + integrity sha512-Auw9a4AxqWpa9GUfj370BMPzzyncfBABW8Mab7BGWBYDj4Isgq+cDKtx0i6u9jcX9pQDnswsaaOTgTmA5pEjuQ== + +fsevents@~2.3.1: + version "2.3.2" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a" + integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA== + +function-bind@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" + integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A== + +functional-red-black-tree@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz#1b0ab3bd553b2a0d6399d29c0e3ea0b252078327" + integrity sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc= + +gauge@~2.7.3: + version "2.7.4" + resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7" + integrity sha1-LANAXHU4w51+s3sxcCLjJfsBi/c= + dependencies: + aproba "^1.0.3" + console-control-strings "^1.0.0" + has-unicode "^2.0.0" + object-assign "^4.1.0" + signal-exit "^3.0.0" + string-width "^1.0.1" + strip-ansi "^3.0.1" + wide-align "^1.1.0" + +gensync@^1.0.0-beta.2: + version "1.0.0-beta.2" + resolved "https://registry.yarnpkg.com/gensync/-/gensync-1.0.0-beta.2.tgz#32a6ee76c3d7f52d46b2b1ae5d93fea8580a25e0" + integrity sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg== + +get-caller-file@^2.0.1: + version "2.0.5" + resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" + integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== + +get-intrinsic@^1.0.2: + version "1.1.3" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.1.3.tgz#063c84329ad93e83893c7f4f243ef63ffa351385" + integrity sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A== + dependencies: + function-bind "^1.1.1" + has "^1.0.3" + has-symbols "^1.0.3" + +get-intrinsic@^1.1.0, get-intrinsic@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.1.1.tgz#15f59f376f855c446963948f0d24cd3637b4abc6" + integrity sha512-kWZrnVM42QCiEA2Ig1bG8zjoIMOgxWwYCEeNdwY6Tv/cOSeGpcoX4pXHfKUxNKVoArnrEr2e9srnAxxGIraS9Q== + dependencies: + function-bind "^1.1.1" + has "^1.0.3" + has-symbols "^1.0.1" + +get-params@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/get-params/-/get-params-0.1.2.tgz#bae0dfaba588a0c60d7834c0d8dc2ff60eeef2fe" + integrity sha1-uuDfq6WIoMYNeDTA2Nwv9g7u8v4= + +get-port@^3.1.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/get-port/-/get-port-3.2.0.tgz#dd7ce7de187c06c8bf353796ac71e099f0980ebc" + integrity sha512-x5UJKlgeUiNT8nyo/AcnwLnZuZNcSjSw0kogRB+Whd1fjjFq4B1hySFxSFWWSn4mIBzg3sRNUDFYc4g5gjPoLg== + +get-stream@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-3.0.0.tgz#8e943d1358dc37555054ecbe2edb05aa174ede14" + integrity sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ= + +get-stream@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-4.1.0.tgz#c1b255575f3dc21d59bfc79cd3d2b46b1c3a54b5" + integrity sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w== + dependencies: + pump "^3.0.0" + +get-stream@^5.0.0, get-stream@^5.1.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-5.2.0.tgz#4966a1795ee5ace65e706c4b7beb71257d6e22d3" + integrity sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA== + dependencies: + pump "^3.0.0" + +get-symbol-description@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/get-symbol-description/-/get-symbol-description-1.0.0.tgz#7fdb81c900101fbd564dd5f1a30af5aadc1e58d6" + integrity sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw== + dependencies: + call-bind "^1.0.2" + get-intrinsic "^1.1.1" + +getpass@^0.1.1: + version "0.1.7" + resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa" + integrity sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng== + dependencies: + assert-plus "^1.0.0" + +glob-base@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/glob-base/-/glob-base-0.3.0.tgz#dbb164f6221b1c0b1ccf82aea328b497df0ea3c4" + integrity sha1-27Fk9iIbHAscz4Kuoyi0l98Oo8Q= + dependencies: + glob-parent "^2.0.0" + is-glob "^2.0.0" + +glob-parent@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-2.0.0.tgz#81383d72db054fcccf5336daa902f182f6edbb28" + integrity sha1-gTg9ctsFT8zPUzbaqQLxgvbtuyg= + dependencies: + is-glob "^2.0.0" + +glob-parent@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-3.1.0.tgz#9e6af6299d8d3bd2bd40430832bd113df906c5ae" + integrity sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4= + dependencies: + is-glob "^3.1.0" + path-dirname "^1.0.0" + +glob-parent@^5.1.0, glob-parent@~5.1.0: + version "5.1.2" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4" + integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow== + dependencies: + is-glob "^4.0.1" + +glob-stream@^5.3.2: + version "5.3.5" + resolved "https://registry.yarnpkg.com/glob-stream/-/glob-stream-5.3.5.tgz#a55665a9a8ccdc41915a87c701e32d4e016fad22" + integrity sha1-pVZlqajM3EGRWofHAeMtTgFvrSI= + dependencies: + extend "^3.0.0" + glob "^5.0.3" + glob-parent "^3.0.0" + micromatch "^2.3.7" + ordered-read-streams "^0.3.0" + through2 "^0.6.0" + to-absolute-glob "^0.1.1" + unique-stream "^2.0.2" + +glob@7.1.6, glob@^7.1.1: + version "7.1.6" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.6.tgz#141f33b81a7c2492e125594307480c46679278a6" + integrity sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA== + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.0.4" + once "^1.3.0" + path-is-absolute "^1.0.0" + +glob@^5.0.3: + version "5.0.15" + resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1" + integrity sha1-G8k2ueAvSmA/zCIuz3Yz0wuLk7E= + dependencies: + inflight "^1.0.4" + inherits "2" + minimatch "2 || 3" + once "^1.3.0" + path-is-absolute "^1.0.0" + +glob@^7.1.3: + version "7.2.3" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.3.tgz#b8df0fb802bbfa8e89bd1d938b4e16578ed44f2b" + integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q== + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.1.1" + once "^1.3.0" + path-is-absolute "^1.0.0" + +global@~4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/global/-/global-4.4.0.tgz#3e7b105179006a323ed71aafca3e9c57a5cc6406" + integrity sha512-wv/LAoHdRE3BeTGz53FAamhGlPLhlssK45usmGFThIi4XqnBmjKQ16u+RNbP7WvigRZDxUsM0J3gcQ5yicaL0w== + dependencies: + min-document "^2.19.0" + process "^0.11.10" + +globals@^11.1.0: + version "11.12.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e" + integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA== + +globals@^9.18.0: + version "9.18.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-9.18.0.tgz#aa3896b3e69b487f17e31ed2143d69a8e30c2d8a" + integrity sha512-S0nG3CLEQiY/ILxqtztTWH/3iRRdyBLw6KMDxnKMchrtbj2OFmehVh0WUCfW3DUrIgx/qFrJPICrq4Z4sTR9UQ== + +globby@11.0.2: + version "11.0.2" + resolved "https://registry.yarnpkg.com/globby/-/globby-11.0.2.tgz#1af538b766a3b540ebfb58a32b2e2d5897321d83" + integrity sha512-2ZThXDvvV8fYFRVIxnrMQBipZQDr7MxKAmQK1vujaj9/7eF0efG7BPUKJ7jP7G5SLF37xKDXvO4S/KKLj/Z0og== + dependencies: + array-union "^2.1.0" + dir-glob "^3.0.1" + fast-glob "^3.1.1" + ignore "^5.1.4" + merge2 "^1.3.0" + slash "^3.0.0" + +gluegun@^4.6.1: + version "4.6.1" + resolved "https://registry.yarnpkg.com/gluegun/-/gluegun-4.6.1.tgz#f2a65d20378873de87a2143b8c3939ffc9a9e2b6" + integrity sha512-Jd5hV1Uku2rjBg59mYA/bnwLwynK7u9A1zmK/LIb/p5d3pzjDCKRjWFuxZXyPwl9rsvKGhJUQxkFo2HEy8crKQ== + dependencies: + apisauce "^2.0.1" + app-module-path "^2.2.0" + cli-table3 "~0.5.0" + colors "^1.3.3" + cosmiconfig "6.0.0" + cross-spawn "^7.0.0" + ejs "^2.6.1" + enquirer "2.3.4" + execa "^3.0.0" + fs-jetpack "^2.2.2" + lodash.camelcase "^4.3.0" + lodash.kebabcase "^4.1.1" + lodash.lowercase "^4.3.0" + lodash.lowerfirst "^4.3.1" + lodash.pad "^4.5.1" + lodash.padend "^4.6.1" + lodash.padstart "^4.6.1" + lodash.repeat "^4.1.0" + lodash.snakecase "^4.1.1" + lodash.startcase "^4.4.0" + lodash.trim "^4.5.1" + lodash.trimend "^4.5.1" + lodash.trimstart "^4.5.1" + lodash.uppercase "^4.3.0" + lodash.upperfirst "^4.3.1" + ora "^4.0.0" + pluralize "^8.0.0" + ramdasauce "^2.1.0" + semver "^7.0.0" + which "^2.0.0" + yargs-parser "^16.1.0" + +"gluegun@https://github.com/edgeandnode/gluegun#v4.3.1-pin-colors-dep": + version "4.3.1" + resolved "https://github.com/edgeandnode/gluegun#b34b9003d7bf556836da41b57ef36eb21570620a" + dependencies: + apisauce "^1.0.1" + app-module-path "^2.2.0" + cli-table3 "~0.5.0" + colors "1.3.3" + cosmiconfig "6.0.0" + cross-spawn "^7.0.0" + ejs "^2.6.1" + enquirer "2.3.4" + execa "^3.0.0" + fs-jetpack "^2.2.2" + lodash.camelcase "^4.3.0" + lodash.kebabcase "^4.1.1" + lodash.lowercase "^4.3.0" + lodash.lowerfirst "^4.3.1" + lodash.pad "^4.5.1" + lodash.padend "^4.6.1" + lodash.padstart "^4.6.1" + lodash.repeat "^4.1.0" + lodash.snakecase "^4.1.1" + lodash.startcase "^4.4.0" + lodash.trim "^4.5.1" + lodash.trimend "^4.5.1" + lodash.trimstart "^4.5.1" + lodash.uppercase "^4.3.0" + lodash.upperfirst "^4.3.1" + ora "^4.0.0" + pluralize "^8.0.0" + ramdasauce "^2.1.0" + semver "^7.0.0" + which "^2.0.0" + yargs-parser "^16.1.0" + +got@9.6.0: + version "9.6.0" + resolved "https://registry.yarnpkg.com/got/-/got-9.6.0.tgz#edf45e7d67f99545705de1f7bbeeeb121765ed85" + integrity sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q== + dependencies: + "@sindresorhus/is" "^0.14.0" + "@szmarczak/http-timer" "^1.1.2" + cacheable-request "^6.0.0" + decompress-response "^3.3.0" + duplexer3 "^0.1.4" + get-stream "^4.1.0" + lowercase-keys "^1.0.1" + mimic-response "^1.0.1" + p-cancelable "^1.0.0" + to-readable-stream "^1.0.0" + url-parse-lax "^3.0.0" + +got@^7.1.0: + version "7.1.0" + resolved "https://registry.yarnpkg.com/got/-/got-7.1.0.tgz#05450fd84094e6bbea56f451a43a9c289166385a" + integrity sha512-Y5WMo7xKKq1muPsxD+KmrR8DH5auG7fBdDVueZwETwV6VytKyU9OX/ddpq2/1hp1vIPvVb4T81dKQz3BivkNLw== + dependencies: + decompress-response "^3.2.0" + duplexer3 "^0.1.4" + get-stream "^3.0.0" + is-plain-obj "^1.1.0" + is-retry-allowed "^1.0.0" + is-stream "^1.0.0" + isurl "^1.0.0-alpha5" + lowercase-keys "^1.0.0" + p-cancelable "^0.3.0" + p-timeout "^1.1.1" + safe-buffer "^5.0.1" + timed-out "^4.0.0" + url-parse-lax "^1.0.0" + url-to-options "^1.0.1" + +graceful-fs@4.X, graceful-fs@^4.0.0, graceful-fs@^4.1.11, graceful-fs@^4.1.2, graceful-fs@^4.1.9: + version "4.2.6" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.6.tgz#ff040b2b0853b23c3d31027523706f1885d76bee" + integrity sha512-nTnJ528pbqxYanhpDYsi4Rd8MAeaBA67+RZ10CM1m3bTAVFEDcd5AuA4a6W5YkGZ1iNXHzZz8T6TBKLeBuNriQ== + +graceful-fs@^4.1.6, graceful-fs@^4.2.0: + version "4.2.10" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.10.tgz#147d3a006da4ca3ce14728c7aefc287c367d7a6c" + integrity sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA== + +graphql-extensions@^0.15.0: + version "0.15.0" + resolved "https://registry.yarnpkg.com/graphql-extensions/-/graphql-extensions-0.15.0.tgz#3f291f9274876b0c289fa4061909a12678bd9817" + integrity sha512-bVddVO8YFJPwuACn+3pgmrEg6I8iBuYLuwvxiE+lcQQ7POotVZxm2rgGw0PvVYmWWf3DT7nTVDZ5ROh/ALp8mA== + dependencies: + "@apollographql/apollo-tools" "^0.5.0" + apollo-server-env "^3.1.0" + apollo-server-types "^0.9.0" + +graphql-subscriptions@^1.0.0: + version "1.2.1" + resolved "https://registry.yarnpkg.com/graphql-subscriptions/-/graphql-subscriptions-1.2.1.tgz#2142b2d729661ddf967b7388f7cf1dd4cf2e061d" + integrity sha512-95yD/tKi24q8xYa7Q9rhQN16AYj5wPbrb8tmHGM3WRc9EBmWrG/0kkMl+tQG8wcEuE9ibR4zyOM31p5Sdr2v4g== + dependencies: + iterall "^1.3.0" + +graphql-tag@^2.11.0: + version "2.12.6" + resolved "https://registry.yarnpkg.com/graphql-tag/-/graphql-tag-2.12.6.tgz#d441a569c1d2537ef10ca3d1633b48725329b5f1" + integrity sha512-FdSNcu2QQcWnM2VNvSCCDCVS5PpPqpzgFT8+GXzqJuoDd0CBncxCY278u4mhRO7tMgo2JjgJA5aZ+nWSQ/Z+xg== + dependencies: + tslib "^2.1.0" + +graphql-tag@^2.12.0: + version "2.12.1" + resolved "https://registry.yarnpkg.com/graphql-tag/-/graphql-tag-2.12.1.tgz#b065ef885e4800e4afd0842811b718a205f4aa58" + integrity sha512-LPewEE1vzGkHnCO8zdOGogKsHHBdtpGyihow1UuMwp6RnZa0lAS7NcbvltLOuo4pi5diQCPASAXZkQq44ffixA== + dependencies: + tslib "^1.14.1" + +graphql-tools@^4.0.8: + version "4.0.8" + resolved "https://registry.yarnpkg.com/graphql-tools/-/graphql-tools-4.0.8.tgz#e7fb9f0d43408fb0878ba66b522ce871bafe9d30" + integrity sha512-MW+ioleBrwhRjalKjYaLQbr+920pHBgy9vM/n47sswtns8+96sRn5M/G+J1eu7IMeKWiN/9p6tmwCHU7552VJg== + dependencies: + apollo-link "^1.2.14" + apollo-utilities "^1.0.1" + deprecated-decorator "^0.1.6" + iterall "^1.1.3" + uuid "^3.1.0" + +graphql-tools@^6.2.4: + version "6.2.6" + resolved "https://registry.yarnpkg.com/graphql-tools/-/graphql-tools-6.2.6.tgz#557c6d32797a02988f214bd596dec2abd12425dd" + integrity sha512-OyhSvK5ALVVD6bFiWjAqv2+lRyvjIRfb6Br5Tkjrv++rxnXDodPH/zhMbDGRw+W3SD5ioGEEz84yO48iPiN7jA== + dependencies: + "@graphql-tools/batch-delegate" "^6.2.6" + "@graphql-tools/code-file-loader" "^6.2.4" + "@graphql-tools/delegate" "^6.2.4" + "@graphql-tools/git-loader" "^6.2.4" + "@graphql-tools/github-loader" "^6.2.4" + "@graphql-tools/graphql-file-loader" "^6.2.4" + "@graphql-tools/graphql-tag-pluck" "^6.2.4" + "@graphql-tools/import" "^6.2.4" + "@graphql-tools/json-file-loader" "^6.2.4" + "@graphql-tools/links" "^6.2.4" + "@graphql-tools/load" "^6.2.4" + "@graphql-tools/load-files" "^6.2.4" + "@graphql-tools/merge" "^6.2.4" + "@graphql-tools/mock" "^6.2.4" + "@graphql-tools/module-loader" "^6.2.4" + "@graphql-tools/relay-operation-optimizer" "^6.2.4" + "@graphql-tools/resolvers-composition" "^6.2.4" + "@graphql-tools/schema" "^6.2.4" + "@graphql-tools/stitch" "^6.2.4" + "@graphql-tools/url-loader" "^6.2.4" + "@graphql-tools/utils" "^6.2.4" + "@graphql-tools/wrap" "^6.2.4" + tslib "~2.0.1" + +graphql-upload@^11.0.0: + version "11.0.0" + resolved "https://registry.yarnpkg.com/graphql-upload/-/graphql-upload-11.0.0.tgz#24b245ff18f353bab6715e8a055db9fd73035e10" + integrity sha512-zsrDtu5gCbQFDWsNa5bMB4nf1LpKX9KDgh+f8oL1288ijV4RxeckhVozAjqjXAfRpxOHD1xOESsh6zq8SjdgjA== + dependencies: + busboy "^0.3.1" + fs-capacitor "^6.1.0" + http-errors "^1.7.3" + isobject "^4.0.0" + object-path "^0.11.4" + +graphql-ws@4.1.5: + version "4.1.5" + resolved "https://registry.yarnpkg.com/graphql-ws/-/graphql-ws-4.1.5.tgz#03526b29acb54a424a9fbe300a4bd69ff65a50b3" + integrity sha512-yUQ1AjegD1Y9jDS699kyw7Mw+9H+rILm2HoS8N5a5B5YTH93xy3yifFhAJpKGc2wb/8yGdlVy8gTcud0TPqi6Q== + +graphql@15.5.0, graphql@^15.3.0: + version "15.5.0" + resolved "https://registry.yarnpkg.com/graphql/-/graphql-15.5.0.tgz#39d19494dbe69d1ea719915b578bf920344a69d5" + integrity sha512-OmaM7y0kaK31NKG31q4YbD2beNYa6jBBKtMFT6gLYJljHLJr42IqJ8KX08u3Li/0ifzTU5HjmoOOrwa5BRLeDA== + +growl@1.10.5: + version "1.10.5" + resolved "https://registry.yarnpkg.com/growl/-/growl-1.10.5.tgz#f2735dc2283674fa67478b10181059355c369e5e" + integrity sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA== + +gulp-sourcemaps@^1.5.2: + version "1.12.1" + resolved "https://registry.yarnpkg.com/gulp-sourcemaps/-/gulp-sourcemaps-1.12.1.tgz#b437d1f3d980cf26e81184823718ce15ae6597b6" + integrity sha1-tDfR89mAzyboEYSCNxjOFa5ll7Y= + dependencies: + "@gulp-sourcemaps/map-sources" "1.X" + acorn "4.X" + convert-source-map "1.X" + css "2.X" + debug-fabulous "0.0.X" + detect-newline "2.X" + graceful-fs "4.X" + source-map "~0.6.0" + strip-bom "2.X" + through2 "2.X" + vinyl "1.X" + +har-schema@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-2.0.0.tgz#a94c2224ebcac04782a0d9035521f24735b7ec92" + integrity sha512-Oqluz6zhGX8cyRaTQlFMPw80bSJVG2x/cFb8ZPhUILGgHka9SsokCCOQgpveePerqidZOrT14ipqfJb7ILcW5Q== + +har-validator@~5.1.3: + version "5.1.5" + resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.1.5.tgz#1f0803b9f8cb20c0fa13822df1ecddb36bde1efd" + integrity sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w== + dependencies: + ajv "^6.12.3" + har-schema "^2.0.0" + +has-ansi@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91" + integrity sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE= + dependencies: + ansi-regex "^2.0.0" + +has-bigints@^1.0.0, has-bigints@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/has-bigints/-/has-bigints-1.0.1.tgz#64fe6acb020673e3b78db035a5af69aa9d07b113" + integrity sha512-LSBS2LjbNBTf6287JEbEzvJgftkF5qFkmCo9hDRpAzKhUOlJ+hx8dd4USs00SgsUNwc4617J9ki5YtEClM2ffA== + +has-flag@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" + integrity sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw== + +has-flag@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" + integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== + +has-symbol-support-x@^1.4.1: + version "1.4.2" + resolved "https://registry.yarnpkg.com/has-symbol-support-x/-/has-symbol-support-x-1.4.2.tgz#1409f98bc00247da45da67cee0a36f282ff26455" + integrity sha512-3ToOva++HaW+eCpgqZrCfN51IPB+7bJNVT6CUATzueB5Heb8o6Nam0V3HG5dlDvZU1Gn5QLcbahiKw/XVk5JJw== + +has-symbols@^1.0.0, has-symbols@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.2.tgz#165d3070c00309752a1236a479331e3ac56f1423" + integrity sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw== + +has-symbols@^1.0.1, has-symbols@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8" + integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A== + +has-to-string-tag-x@^1.2.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/has-to-string-tag-x/-/has-to-string-tag-x-1.4.1.tgz#a045ab383d7b4b2012a00148ab0aa5f290044d4d" + integrity sha512-vdbKfmw+3LoOYVr+mtxHaX5a96+0f3DljYd8JOqvOLsf5mw2Otda2qCDT9qRqLAhrjyQ0h7ual5nOiASpsGNFw== + dependencies: + has-symbol-support-x "^1.4.1" + +has-tostringtag@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.0.tgz#7e133818a7d394734f941e73c3d3f9291e658b25" + integrity sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ== + dependencies: + has-symbols "^1.0.2" + +has-unicode@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9" + integrity sha1-4Ob+aijPUROIVeCG0Wkedx3iqLk= + +has@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796" + integrity sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw== + dependencies: + function-bind "^1.1.1" + +hash-base@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/hash-base/-/hash-base-3.1.0.tgz#55c381d9e06e1d2997a883b4a3fddfe7f0d3af33" + integrity sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA== + dependencies: + inherits "^2.0.4" + readable-stream "^3.6.0" + safe-buffer "^5.2.0" + +hash.js@1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/hash.js/-/hash.js-1.1.3.tgz#340dedbe6290187151c1ea1d777a3448935df846" + integrity sha512-/UETyP0W22QILqS+6HowevwhEFJ3MBJnwTf75Qob9Wz9t0DPuisL8kW8YZMK62dHAKE1c1p+gY1TtOLY+USEHA== + dependencies: + inherits "^2.0.3" + minimalistic-assert "^1.0.0" + +hash.js@1.1.7, hash.js@^1.0.0, hash.js@^1.0.3, hash.js@^1.1.7: + version "1.1.7" + resolved "https://registry.yarnpkg.com/hash.js/-/hash.js-1.1.7.tgz#0babca538e8d4ee4a0f8988d68866537a003cf42" + integrity sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA== + dependencies: + inherits "^2.0.3" + minimalistic-assert "^1.0.1" + +he@1.2.0, he@^1.1.1: + version "1.2.0" + resolved "https://registry.yarnpkg.com/he/-/he-1.2.0.tgz#84ae65fa7eafb165fddb61566ae14baf05664f0f" + integrity sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw== + +header-case@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/header-case/-/header-case-1.0.1.tgz#9535973197c144b09613cd65d317ef19963bd02d" + integrity sha1-lTWXMZfBRLCWE81l0xfvGZY70C0= + dependencies: + no-case "^2.2.0" + upper-case "^1.1.3" + +hi-base32@~0.5.0: + version "0.5.1" + resolved "https://registry.yarnpkg.com/hi-base32/-/hi-base32-0.5.1.tgz#1279f2ddae2673219ea5870c2121d2a33132857e" + integrity sha512-EmBBpvdYh/4XxsnUybsPag6VikPYnN30td+vQk+GI3qpahVEG9+gTkG0aXVxTjBqQ5T6ijbWIu77O+C5WFWsnA== + +highlight.js@^10.4.0, highlight.js@^10.4.1: + version "10.6.0" + resolved "https://registry.yarnpkg.com/highlight.js/-/highlight.js-10.6.0.tgz#0073aa71d566906965ba6e1b7be7b2682f5e18b6" + integrity sha512-8mlRcn5vk/r4+QcqerapwBYTe+iPL5ih6xrNylxrnBdHQiijDETfXX7VIxC3UiCRiINBJfANBAsPzAvRQj8RpQ== + +highlightjs-solidity@^1.0.21: + version "1.0.21" + resolved "https://registry.yarnpkg.com/highlightjs-solidity/-/highlightjs-solidity-1.0.21.tgz#6d257215b5b635231d4d0c523f2c419bbff6fe42" + integrity sha512-ozOtTD986CBIxuIuauzz2lqCOTpd27TbfYm+msMtNSB69mJ0cdFNvZ6rOO5iFtEHtDkVYVEFQywXffG2sX3XTw== + +hmac-drbg@^1.0.0, hmac-drbg@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/hmac-drbg/-/hmac-drbg-1.0.1.tgz#d2745701025a6c775a6c545793ed502fc0c649a1" + integrity sha1-0nRXAQJabHdabFRXk+1QL8DGSaE= + dependencies: + hash.js "^1.0.3" + minimalistic-assert "^1.0.0" + minimalistic-crypto-utils "^1.0.1" + +hoist-non-react-statics@^3.3.2: + version "3.3.2" + resolved "https://registry.yarnpkg.com/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz#ece0acaf71d62c2969c2ec59feff42a4b1a85b45" + integrity sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw== + dependencies: + react-is "^16.7.0" + +home-or-tmp@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/home-or-tmp/-/home-or-tmp-2.0.0.tgz#e36c3f2d2cae7d746a857e38d18d5f32a7882db8" + integrity sha1-42w/LSyufXRqhX440Y1fMqeILbg= + dependencies: + os-homedir "^1.0.0" + os-tmpdir "^1.0.1" + +hosted-git-info@^2.1.4: + version "2.8.8" + resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.8.8.tgz#7539bd4bc1e0e0a895815a2e0262420b12858488" + integrity sha512-f/wzC2QaWBs7t9IYqB4T3sR1xviIViXJRJTWBlx2Gf3g0Xi5vI7Yy4koXQ1c9OYDGHN9sBy1DQ2AB8fqZBWhUg== + +htmlparser2@^3.9.1: + version "3.10.1" + resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-3.10.1.tgz#bd679dc3f59897b6a34bb10749c855bb53a9392f" + integrity sha512-IgieNijUMbkDovyoKObU1DUhm1iwNYE/fuifEoEHfd1oZKZDaONBSkal7Y01shxsM49R4XaMdGez3WnF9UfiCQ== + dependencies: + domelementtype "^1.3.1" + domhandler "^2.3.0" + domutils "^1.5.1" + entities "^1.1.1" + inherits "^2.0.1" + readable-stream "^3.1.1" + +htmlparser2@^6.0.0: + version "6.0.1" + resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-6.0.1.tgz#422521231ef6d42e56bd411da8ba40aa36e91446" + integrity sha512-GDKPd+vk4jvSuvCbyuzx/unmXkk090Azec7LovXP8as1Hn8q9p3hbjmDGbUqqhknw0ajwit6LiiWqfiTUPMK7w== + dependencies: + domelementtype "^2.0.1" + domhandler "^4.0.0" + domutils "^2.4.4" + entities "^2.0.0" + +htmlparser2@~3.8.1: + version "3.8.3" + resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-3.8.3.tgz#996c28b191516a8be86501a7d79757e5c70c1068" + integrity sha1-mWwosZFRaovoZQGn15dX5ccMEGg= + dependencies: + domelementtype "1" + domhandler "2.3" + domutils "1.5" + entities "1.0" + readable-stream "1.1" + +http-basic@^8.1.1: + version "8.1.3" + resolved "https://registry.yarnpkg.com/http-basic/-/http-basic-8.1.3.tgz#a7cabee7526869b9b710136970805b1004261bbf" + integrity sha512-/EcDMwJZh3mABI2NhGfHOGOeOZITqfkEO4p/xK+l3NpyncIHUQBoMvCSF/b5GqvKtySC2srL/GGG3+EtlqlmCw== + dependencies: + caseless "^0.12.0" + concat-stream "^1.6.2" + http-response-object "^3.0.1" + parse-cache-control "^1.0.1" + +http-cache-semantics@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz#49e91c5cbf36c9b94bcfcd71c23d5249ec74e390" + integrity sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ== + +http-errors@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-2.0.0.tgz#b7774a1486ef73cf7667ac9ae0858c012c57b9d3" + integrity sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ== + dependencies: + depd "2.0.0" + inherits "2.0.4" + setprototypeof "1.2.0" + statuses "2.0.1" + toidentifier "1.0.1" + +http-errors@^1.7.3: + version "1.8.0" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.8.0.tgz#75d1bbe497e1044f51e4ee9e704a62f28d336507" + integrity sha512-4I8r0C5JDhT5VkvI47QktDW75rNlGVsUf/8hzjCC/wkWI/jdTRmBb9aI7erSG82r1bjKY3F6k28WnsVxB1C73A== + dependencies: + depd "~1.1.2" + inherits "2.0.4" + setprototypeof "1.2.0" + statuses ">= 1.5.0 < 2" + toidentifier "1.0.0" + +http-https@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/http-https/-/http-https-1.0.0.tgz#2f908dd5f1db4068c058cd6e6d4ce392c913389b" + integrity sha1-L5CN1fHbQGjAWM1ubUzjkskTOJs= + +http-response-object@^3.0.1: + version "3.0.2" + resolved "https://registry.yarnpkg.com/http-response-object/-/http-response-object-3.0.2.tgz#7f435bb210454e4360d074ef1f989d5ea8aa9810" + integrity sha512-bqX0XTF6fnXSQcEJ2Iuyr75yVakyjIDCqroJQ/aHfSdlM743Cwqoi2nDYMzLGWUcuTWGWy8AAvOKXTfiv6q9RA== + dependencies: + "@types/node" "^10.0.3" + +http-signature@~1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.2.0.tgz#9aecd925114772f3d95b65a60abb8f7c18fbace1" + integrity sha512-CAbnr6Rz4CYQkLYUtSNXxQPUH2gK8f3iWexVlsnMeD+GjlsQ0Xsy1cOX+mN3dtxYomRy21CiOzU8Uhw6OwncEQ== + dependencies: + assert-plus "^1.0.0" + jsprim "^1.2.2" + sshpk "^1.7.0" + +human-signals@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-1.1.1.tgz#c5b1cd14f50aeae09ab6c59fe63ba3395fe4dfa3" + integrity sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw== + +ice-cap@0.0.4: + version "0.0.4" + resolved "https://registry.yarnpkg.com/ice-cap/-/ice-cap-0.0.4.tgz#8a6d31ab4cac8d4b56de4fa946df3352561b6e18" + integrity sha1-im0xq0ysjUtW3k+pRt8zUlYbbhg= + dependencies: + cheerio "0.20.0" + color-logger "0.0.3" + +iconv-lite@0.4.24, iconv-lite@^0.4.4: + version "0.4.24" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b" + integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA== + dependencies: + safer-buffer ">= 2.1.2 < 3" + +iconv-lite@^0.6.2: + version "0.6.2" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.6.2.tgz#ce13d1875b0c3a674bd6a04b7f76b01b1b6ded01" + integrity sha512-2y91h5OpQlolefMPmUlivelittSWy0rP+oYVpn6A7GwVHNE8AWzoYOBNmlwks3LobaJxgHCYZAnyNo2GgpNRNQ== + dependencies: + safer-buffer ">= 2.1.2 < 3.0.0" + +idna-uts46-hx@^2.3.1: + version "2.3.1" + resolved "https://registry.yarnpkg.com/idna-uts46-hx/-/idna-uts46-hx-2.3.1.tgz#a1dc5c4df37eee522bf66d969cc980e00e8711f9" + integrity sha512-PWoF9Keq6laYdIRwwCdhTPl60xRqAloYNMQLiyUnG42VjT53oW07BXIRM+NK7eQjzXjAk2gUvX9caRxlnF9TAA== + dependencies: + punycode "2.1.0" + +ieee754@^1.1.13, ieee754@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.2.1.tgz#8eb7a10a63fff25d15a57b001586d177d1b0d352" + integrity sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA== + +ignore-walk@^3.0.1: + version "3.0.3" + resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-3.0.3.tgz#017e2447184bfeade7c238e4aefdd1e8f95b1e37" + integrity sha512-m7o6xuOaT1aqheYHKf8W6J5pYH85ZI9w077erOzLje3JsB1gkafkAhHHY19dqjulgIZHFm32Cp5uNZgcQqdJKw== + dependencies: + minimatch "^3.0.4" + +ignore@^5.1.4: + version "5.1.8" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.1.8.tgz#f150a8b50a34289b33e22f5889abd4d8016f0e57" + integrity sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw== + +immediate@3.0.6: + version "3.0.6" + resolved "https://registry.yarnpkg.com/immediate/-/immediate-3.0.6.tgz#9db1dbd0faf8de6fbe0f5dd5e56bb606280de69b" + integrity sha1-nbHb0Pr43m++D13V5Wu2BigN5ps= + +immediate@3.3.0, immediate@^3.2.2, immediate@^3.2.3: + version "3.3.0" + resolved "https://registry.yarnpkg.com/immediate/-/immediate-3.3.0.tgz#1aef225517836bcdf7f2a2de2600c79ff0269266" + integrity sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q== + +immediate@~3.2.3: + version "3.2.3" + resolved "https://registry.yarnpkg.com/immediate/-/immediate-3.2.3.tgz#d140fa8f614659bd6541233097ddaac25cdd991c" + integrity sha1-0UD6j2FGWb1lQSMwl92qwlzdmRw= + +immutable@3.8.2: + version "3.8.2" + resolved "https://registry.yarnpkg.com/immutable/-/immutable-3.8.2.tgz#c2439951455bb39913daf281376f1530e104adf3" + integrity sha1-wkOZUUVbs5kT2vKBN28VMOEErfM= + +immutable@~3.7.6: + version "3.7.6" + resolved "https://registry.yarnpkg.com/immutable/-/immutable-3.7.6.tgz#13b4d3cb12befa15482a26fe1b2ebae640071e4b" + integrity sha1-E7TTyxK++hVIKib+Gy665kAHHks= + +import-fresh@^3.1.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.3.0.tgz#37162c25fcb9ebaa2e6e53d5b4d88ce17d9e0c2b" + integrity sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw== + dependencies: + parent-module "^1.0.0" + resolve-from "^4.0.0" + +import-from@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/import-from/-/import-from-3.0.0.tgz#055cfec38cd5a27d8057ca51376d7d3bf0891966" + integrity sha512-CiuXOFFSzkU5x/CR0+z7T91Iht4CXgfCxVOFRhh2Zyhg5wOpWvvDLQUsWl+gcN+QscYBjez8hDCt85O7RLDttQ== + dependencies: + resolve-from "^5.0.0" + +imurmurhash@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" + integrity sha1-khi5srkoojixPcT7a21XbyMUU+o= + +inflight@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" + integrity sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA== + dependencies: + once "^1.3.0" + wrappy "1" + +inherits@2, inherits@2.0.4, inherits@^2.0.1, inherits@^2.0.3, inherits@^2.0.4, inherits@~2.0.1, inherits@~2.0.3: + version "2.0.4" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" + integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== + +inherits@2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" + integrity sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4= + +ini@~1.3.0: + version "1.3.8" + resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.8.tgz#a29da425b48806f34767a4efce397269af28432c" + integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew== + +internal-slot@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/internal-slot/-/internal-slot-1.0.3.tgz#7347e307deeea2faac2ac6205d4bc7d34967f59c" + integrity sha512-O0DB1JC/sPyZl7cIo78n5dR7eUSwwpYPiXRhTzNxZVAMUuB8vlnRFyLxdrVToks6XPLVnFfbzaVd5WLjhgg+vA== + dependencies: + get-intrinsic "^1.1.0" + has "^1.0.3" + side-channel "^1.0.4" + +invariant@^2.2.2: + version "2.2.4" + resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.4.tgz#610f3c92c9359ce1db616e538008d23ff35158e6" + integrity sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA== + dependencies: + loose-envify "^1.0.0" + +invert-kv@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-1.0.0.tgz#104a8e4aaca6d3d8cd157a8ef8bfab2d7a3ffdb6" + integrity sha1-EEqOSqym09jNFXqO+L+rLXo//bY= + +ip-regex@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-2.1.0.tgz#fa78bf5d2e6913c911ce9f819ee5146bb6d844e9" + integrity sha512-58yWmlHpp7VYfcdTwMTvwMmqx/Elfxjd9RXTDyMsbL7lLWmhMylLEqiYVLKuLzOZqVgiWXD9MfR62Vv89VRxkw== + +ip-regex@^4.0.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-4.3.0.tgz#687275ab0f57fa76978ff8f4dddc8a23d5990db5" + integrity sha512-B9ZWJxHHOHUhUjCPrMpLD4xEq35bUTClHM1S6CBU5ixQnkZmwipwgc96vAd7AAGM9TGHvJR+Uss+/Ak6UphK+Q== + +ip@^1.1.5: + version "1.1.8" + resolved "https://registry.yarnpkg.com/ip/-/ip-1.1.8.tgz#ae05948f6b075435ed3307acce04629da8cdbf48" + integrity sha512-PuExPYUiu6qMBQb4l06ecm6T6ujzhmh+MeJcW9wa89PoAz5pvd4zPgN5WJV104mb6S2T1AwNIAaB70JNrLQWhg== + +ipaddr.js@1.9.1: + version "1.9.1" + resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.9.1.tgz#bff38543eeb8984825079ff3a2a8e6cbd46781b3" + integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g== + +ipfs-block@~0.8.1: + version "0.8.1" + resolved "https://registry.yarnpkg.com/ipfs-block/-/ipfs-block-0.8.1.tgz#05e1068832775e8f1c2da5b64106cc837fd2acb9" + integrity sha512-0FaCpmij+jZBoUYhjoB5ptjdl9QzvrdRIoBmUU5JiBnK2GA+4YM/ifklaB8ePRhA/rRzhd+KYBjvMFMAL4NrVQ== + dependencies: + cids "~0.7.0" + class-is "^1.1.0" + +ipfs-http-client@34.0.0: + version "34.0.0" + resolved "https://registry.yarnpkg.com/ipfs-http-client/-/ipfs-http-client-34.0.0.tgz#8804d06a11c22306332a8ffa0949b6f672a0c9c8" + integrity sha512-4RCkk8ix4Dqn6sxqFVwuXWCZ1eLFPsVaj6Ijvu1fs9VYgxgVudsW9PWwarlr4mw1xUCmPWYyXnEbGgzBrfMy0Q== + dependencies: + abort-controller "^3.0.0" + async "^2.6.1" + bignumber.js "^9.0.0" + bl "^3.0.0" + bs58 "^4.0.1" + buffer "^5.4.2" + cids "~0.7.1" + concat-stream "github:hugomrdias/concat-stream#feat/smaller" + debug "^4.1.0" + detect-node "^2.0.4" + end-of-stream "^1.4.1" + err-code "^2.0.0" + explain-error "^1.0.4" + flatmap "0.0.3" + glob "^7.1.3" + ipfs-block "~0.8.1" + ipfs-utils "~0.0.3" + ipld-dag-cbor "~0.15.0" + ipld-dag-pb "~0.17.3" + ipld-raw "^4.0.0" + is-ipfs "~0.6.1" + is-pull-stream "0.0.0" + is-stream "^2.0.0" + iso-stream-http "~0.1.2" + iso-url "~0.4.6" + iterable-ndjson "^1.1.0" + just-kebab-case "^1.1.0" + just-map-keys "^1.1.0" + kind-of "^6.0.2" + ky "^0.11.2" + ky-universal "^0.2.2" + lru-cache "^5.1.1" + multiaddr "^6.0.6" + multibase "~0.6.0" + multicodec "~0.5.1" + multihashes "~0.4.14" + ndjson "github:hugomrdias/ndjson#feat/readable-stream3" + once "^1.4.0" + peer-id "~0.12.3" + peer-info "~0.15.1" + promise-nodeify "^3.0.1" + promisify-es6 "^1.0.3" + pull-defer "~0.2.3" + pull-stream "^3.6.9" + pull-to-stream "~0.1.1" + pump "^3.0.0" + qs "^6.5.2" + readable-stream "^3.1.1" + stream-to-pull-stream "^1.7.2" + tar-stream "^2.0.1" + through2 "^3.0.1" + +ipfs-utils@~0.0.3: + version "0.0.4" + resolved "https://registry.yarnpkg.com/ipfs-utils/-/ipfs-utils-0.0.4.tgz#946114cfeb6afb4454b4ccb10d2327cd323b0cce" + integrity sha512-7cZf6aGj2FG3XJWhCNwn4mS93Q0GEWjtBZvEHqzgI43U2qzNDCyzfS1pei1Y5F+tw/zDJ5U4XG0G9reJxR53Ig== + dependencies: + buffer "^5.2.1" + is-buffer "^2.0.3" + is-electron "^2.2.0" + is-pull-stream "0.0.0" + is-stream "^2.0.0" + kind-of "^6.0.2" + readable-stream "^3.4.0" + +ipld-dag-cbor@~0.15.0: + version "0.15.3" + resolved "https://registry.yarnpkg.com/ipld-dag-cbor/-/ipld-dag-cbor-0.15.3.tgz#283afdb81d5b07db8e4fff7a10ef5e517e87f299" + integrity sha512-m23nG7ZyoVFnkK55/bLAErc7EfiMgaEQlqHWDTGzPI+O5r6bPfp+qbL5zTVSIT8tpbHmu174dwerVtLoVgeVyA== + dependencies: + borc "^2.1.2" + buffer "^5.5.0" + cids "~0.8.0" + is-circular "^1.0.2" + multicodec "^1.0.0" + multihashing-async "~0.8.0" + +ipld-dag-pb@~0.17.3: + version "0.17.4" + resolved "https://registry.yarnpkg.com/ipld-dag-pb/-/ipld-dag-pb-0.17.4.tgz#080841cfdd014d996f8da7f3a522ec8b1f6b6494" + integrity sha512-YwCxETEMuXVspOKOhjIOHJvKvB/OZfCDkpSFiYBQN2/JQjM9y/RFCYzIQGm0wg7dCFLrhvfjAZLTSaKs65jzWA== + dependencies: + cids "~0.7.0" + class-is "^1.1.0" + multicodec "~0.5.1" + multihashing-async "~0.7.0" + protons "^1.0.1" + stable "~0.1.8" + +ipld-raw@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/ipld-raw/-/ipld-raw-4.0.1.tgz#49a6f58cdfece5a4d581925b19ee19255be2a29d" + integrity sha512-WjIdtZ06jJEar8zh+BHB84tE6ZdbS/XNa7+XCArOYfmeJ/c01T9VQpeMwdJQYn5c3s5UvvCu7y4VIi3vk2g1bA== + dependencies: + cids "~0.7.0" + multicodec "^1.0.0" + multihashing-async "~0.8.0" + +is-arguments@^1.0.4, is-arguments@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-arguments/-/is-arguments-1.1.0.tgz#62353031dfbee07ceb34656a6bde59efecae8dd9" + integrity sha512-1Ij4lOMPl/xB5kBDn7I+b2ttPMKa8szhEIrXDuXQD/oe3HJLTLhqhgGspwgyGd6MOywBUqVvYicF72lkgDnIHg== + dependencies: + call-bind "^1.0.0" + +is-arrayish@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" + integrity sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg== + +is-bigint@^1.0.1: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-bigint/-/is-bigint-1.0.4.tgz#08147a1875bc2b32005d41ccd8291dffc6691df3" + integrity sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg== + dependencies: + has-bigints "^1.0.1" + +is-binary-path@~2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-2.1.0.tgz#ea1f7f3b80f064236e83470f86c09c254fb45b09" + integrity sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw== + dependencies: + binary-extensions "^2.0.0" + +is-boolean-object@^1.1.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/is-boolean-object/-/is-boolean-object-1.1.2.tgz#5c6dc200246dd9321ae4b885a114bb1f75f63719" + integrity sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA== + dependencies: + call-bind "^1.0.2" + has-tostringtag "^1.0.0" + +is-buffer@^1.1.5: + version "1.1.6" + resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be" + integrity sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w== + +is-buffer@^2.0.3, is-buffer@~2.0.3: + version "2.0.5" + resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-2.0.5.tgz#ebc252e400d22ff8d77fa09888821a24a658c191" + integrity sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ== + +is-callable@^1.1.3, is-callable@^1.1.4, is-callable@^1.2.4: + version "1.2.4" + resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.2.4.tgz#47301d58dd0259407865547853df6d61fe471945" + integrity sha512-nsuwtxZfMX67Oryl9LCQ+upnC0Z0BgpwntpS89m1H/TLF0zNfzfLMV/9Wa/6MZsj0acpEjAO0KF1xT6ZdLl95w== + +is-callable@^1.2.3: + version "1.2.3" + resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.2.3.tgz#8b1e0500b73a1d76c70487636f368e519de8db8e" + integrity sha512-J1DcMe8UYTBSrKezuIUTUwjXsho29693unXM2YhJUTR2txK/eG47bvNa/wipPFmZFgr/N6f1GA66dv0mEyTIyQ== + +is-circular@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-circular/-/is-circular-1.0.2.tgz#2e0ab4e9835f4c6b0ea2b9855a84acd501b8366c" + integrity sha512-YttjnrswnUYRVJvxCvu8z+PGMUSzC2JttP0OEXezlAEdp3EXzhf7IZ3j0gRAybJBQupedIZFhY61Tga6E0qASA== + +is-core-module@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.2.0.tgz#97037ef3d52224d85163f5597b2b63d9afed981a" + integrity sha512-XRAfAdyyY5F5cOXn7hYQDqh2Xmii+DEfIcQGxK/uNwMHhIkPWO0g8msXcbzLe+MpGoR951MlqM/2iIlU4vKDdQ== + dependencies: + has "^1.0.3" + +is-date-object@^1.0.1: + version "1.0.5" + resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.5.tgz#0841d5536e724c25597bf6ea62e1bd38298df31f" + integrity sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ== + dependencies: + has-tostringtag "^1.0.0" + +is-dotfile@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/is-dotfile/-/is-dotfile-1.0.3.tgz#a6a2f32ffd2dfb04f5ca25ecd0f6b83cf798a1e1" + integrity sha1-pqLzL/0t+wT1yiXs0Pa4PPeYoeE= + +is-electron@^2.2.0: + version "2.2.1" + resolved "https://registry.yarnpkg.com/is-electron/-/is-electron-2.2.1.tgz#751b1dd8a74907422faa5c35aaa0cf66d98086e9" + integrity sha512-r8EEQQsqT+Gn0aXFx7lTFygYQhILLCB+wn0WCDL5LZRINeLH/Rvw1j2oKodELLXYNImQ3CRlVsY8wW4cGOsyuw== + +is-equal-shallow@^0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz#2238098fc221de0bcfa5d9eac4c45d638aa1c534" + integrity sha1-IjgJj8Ih3gvPpdnqxMRdY4qhxTQ= + dependencies: + is-primitive "^2.0.0" + +is-extendable@^0.1.0, is-extendable@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89" + integrity sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik= + +is-extglob@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-1.0.0.tgz#ac468177c4943405a092fc8f29760c6ffc6206c0" + integrity sha1-rEaBd8SUNAWgkvyPKXYMb/xiBsA= + +is-extglob@^2.1.0, is-extglob@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" + integrity sha1-qIwCU1eR8C7TfHahueqXc8gz+MI= + +is-finite@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-finite/-/is-finite-1.1.0.tgz#904135c77fb42c0641d6aa1bcdbc4daa8da082f3" + integrity sha512-cdyMtqX/BOqqNBBiKlIVkytNHm49MtMlYyn1zxzvJKWmFMlGzm+ry5BBfYyeY9YmNKbRSo/o7OX9w9ale0wg3w== + +is-fn@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-fn/-/is-fn-1.0.0.tgz#9543d5de7bcf5b08a22ec8a20bae6e286d510d8c" + integrity sha1-lUPV3nvPWwiiLsiiC65uKG1RDYw= + +is-fullwidth-code-point@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb" + integrity sha1-754xOG8DGn8NZDr4L95QxFfvAMs= + dependencies: + number-is-nan "^1.0.0" + +is-fullwidth-code-point@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz#a3b30a5c4f199183167aaab93beefae3ddfb654f" + integrity sha512-VHskAKYM8RfSFXwee5t5cbN5PZeq1Wrh6qd5bkyiXIf6UQcN6w/A0eXM9r6t8d+GYOh+o6ZhiEnb88LN/Y8m2w== + +is-fullwidth-code-point@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d" + integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg== + +is-function@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-function/-/is-function-1.0.2.tgz#4f097f30abf6efadac9833b17ca5dc03f8144e08" + integrity sha512-lw7DUp0aWXYg+CBCN+JKkcE0Q2RayZnSvnZBlwgxHBQhqt5pZNVy4Ri7H9GmmXkdu7LUthszM+Tor1u/2iBcpQ== + +is-generator-function@^1.0.7: + version "1.0.8" + resolved "https://registry.yarnpkg.com/is-generator-function/-/is-generator-function-1.0.8.tgz#dfb5c2b120e02b0a8d9d2c6806cd5621aa922f7b" + integrity sha512-2Omr/twNtufVZFr1GhxjOMFPAj2sjc/dKaIqBhvo4qciXfJmITGH6ZGd8eZYNHza8t1y0e01AuqRhJwfWp26WQ== + +is-glob@4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.1.tgz#7567dbe9f2f5e2467bc77ab83c4a29482407a5dc" + integrity sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg== + dependencies: + is-extglob "^2.1.1" + +is-glob@^2.0.0, is-glob@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-2.0.1.tgz#d096f926a3ded5600f3fdfd91198cb0888c2d863" + integrity sha1-0Jb5JqPe1WAPP9/ZEZjLCIjC2GM= + dependencies: + is-extglob "^1.0.0" + +is-glob@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-3.1.0.tgz#7ba5ae24217804ac70707b96922567486cc3e84a" + integrity sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo= + dependencies: + is-extglob "^2.1.0" + +is-glob@^4.0.1, is-glob@~4.0.1: + version "4.0.3" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.3.tgz#64f61e42cbbb2eec2071a9dac0b28ba1e65d5084" + integrity sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg== + dependencies: + is-extglob "^2.1.1" + +is-hex-prefixed@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-hex-prefixed/-/is-hex-prefixed-1.0.0.tgz#7d8d37e6ad77e5d127148913c573e082d777f554" + integrity sha512-WvtOiug1VFrE9v1Cydwm+FnXd3+w9GaeVUss5W4v/SLy3UW00vP+6iNF2SdnfiBoLy4bTqVdkftNGTUeOFVsbA== + +is-interactive@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-interactive/-/is-interactive-1.0.0.tgz#cea6e6ae5c870a7b0a0004070b7b587e0252912e" + integrity sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w== + +is-ip@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-ip/-/is-ip-2.0.0.tgz#68eea07e8a0a0a94c2d080dd674c731ab2a461ab" + integrity sha512-9MTn0dteHETtyUx8pxqMwg5hMBi3pvlyglJ+b79KOCca0po23337LbVV2Hl4xmMvfw++ljnO0/+5G6G+0Szh6g== + dependencies: + ip-regex "^2.0.0" + +is-ip@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/is-ip/-/is-ip-3.1.0.tgz#2ae5ddfafaf05cb8008a62093cf29734f657c5d8" + integrity sha512-35vd5necO7IitFPjd/YBeqwWnyDWbuLH9ZXQdMfDA8TEo7pv5X8yfrvVO3xbJbLUlERCMvf6X0hTUamQxCYJ9Q== + dependencies: + ip-regex "^4.0.0" + +is-ipfs@~0.6.1: + version "0.6.3" + resolved "https://registry.yarnpkg.com/is-ipfs/-/is-ipfs-0.6.3.tgz#82a5350e0a42d01441c40b369f8791e91404c497" + integrity sha512-HyRot1dvLcxImtDqPxAaY1miO6WsiP/z7Yxpg2qpaLWv5UdhAPtLvHJ4kMLM0w8GSl8AFsVF23PHe1LzuWrUlQ== + dependencies: + bs58 "^4.0.1" + cids "~0.7.0" + mafmt "^7.0.0" + multiaddr "^7.2.1" + multibase "~0.6.0" + multihashes "~0.4.13" + +is-lower-case@^1.1.0: + version "1.1.3" + resolved "https://registry.yarnpkg.com/is-lower-case/-/is-lower-case-1.1.3.tgz#7e147be4768dc466db3bfb21cc60b31e6ad69393" + integrity sha1-fhR75HaNxGbbO/shzGCzHmrWk5M= + dependencies: + lower-case "^1.1.0" + +is-map@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/is-map/-/is-map-2.0.2.tgz#00922db8c9bf73e81b7a335827bc2a43f2b91127" + integrity sha512-cOZFQQozTha1f4MxLFzlgKYPTyj26picdZTx82hbc/Xf4K/tZOOXSCkMvU4pKioRXGDLJRn0GM7Upe7kR721yg== + +is-negative-zero@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/is-negative-zero/-/is-negative-zero-2.0.1.tgz#3de746c18dda2319241a53675908d8f766f11c24" + integrity sha512-2z6JzQvZRa9A2Y7xC6dQQm4FSTSTNWjKIYYTt4246eMTJmIo0Q+ZyOsU66X8lxK1AbB92dFeglPLrhwpeRKO6w== + +is-number-object@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/is-number-object/-/is-number-object-1.0.6.tgz#6a7aaf838c7f0686a50b4553f7e54a96494e89f0" + integrity sha512-bEVOqiRcvo3zO1+G2lVMy+gkkEm9Yh7cDMRusKKu5ZJKPUYSJwICTKZrNKHA2EbSP0Tu0+6B/emsYNHZyn6K8g== + dependencies: + has-tostringtag "^1.0.0" + +is-number@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-2.1.0.tgz#01fcbbb393463a548f2f466cce16dece49db908f" + integrity sha1-Afy7s5NGOlSPL0ZszhbezknbkI8= + dependencies: + kind-of "^3.0.2" + +is-number@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-4.0.0.tgz#0026e37f5454d73e356dfe6564699867c6a7f0ff" + integrity sha512-rSklcAIlf1OmFdyAqbnWTLVelsQ58uvZ66S/ZyawjWqIviTWCjg2PzVGw8WUA+nNuPTqb4wgA+NszrJ+08LlgQ== + +is-number@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b" + integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng== + +is-obj@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-obj/-/is-obj-1.0.1.tgz#3e4729ac1f5fde025cd7d83a896dab9f4f67db0f" + integrity sha1-PkcprB9f3gJc19g6iW2rn09n2w8= + +is-object@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-object/-/is-object-1.0.2.tgz#a56552e1c665c9e950b4a025461da87e72f86fcf" + integrity sha512-2rRIahhZr2UWb45fIOuvZGpFtz0TyOZLf32KxBbSoUCeZR495zCKlWUKKUByk3geS2eAs7ZAABt0Y/Rx0GiQGA== + +is-plain-obj@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-1.1.0.tgz#71a50c8429dfca773c92a390a4a03b39fcd51d3e" + integrity sha1-caUMhCnfync8kqOQpKA7OfzVHT4= + +is-posix-bracket@^0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz#3334dc79774368e92f016e6fbc0a88f5cd6e6bc4" + integrity sha1-MzTceXdDaOkvAW5vvAqI9c1ua8Q= + +is-primitive@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-primitive/-/is-primitive-2.0.0.tgz#207bab91638499c07b2adf240a41a87210034575" + integrity sha1-IHurkWOEmcB7Kt8kCkGochADRXU= + +is-promise@4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/is-promise/-/is-promise-4.0.0.tgz#42ff9f84206c1991d26debf520dd5c01042dd2f3" + integrity sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ== + +is-promise@~1, is-promise@~1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-promise/-/is-promise-1.0.1.tgz#31573761c057e33c2e91aab9e96da08cefbe76e5" + integrity sha512-mjWH5XxnhMA8cFnDchr6qRP9S/kLntKuEfIYku+PaN1CnS8v+OG9O/BKpRCVRJvpIkgAZm0Pf5Is3iSSOILlcg== + +is-pull-stream@0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/is-pull-stream/-/is-pull-stream-0.0.0.tgz#a3bc3d1c6d3055151c46bde6f399efed21440ca9" + integrity sha512-NWLwqCc95I6m8FZDYLAmVJc9Xgk8O+8pPOoDKFTC293FH4S7FBcbLCw3WWPCdiT8uUSdzPy47VM08WPDMJJrag== + +is-regex@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.1.2.tgz#81c8ebde4db142f2cf1c53fc86d6a45788266251" + integrity sha512-axvdhb5pdhEVThqJzYXwMlVuZwC+FF2DpcOhTS+y/8jVq4trxyPgfcwIxIKiyeuLlSQYKkmUaPQJ8ZE4yNKXDg== + dependencies: + call-bind "^1.0.2" + has-symbols "^1.0.1" + +is-regex@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.1.4.tgz#eef5663cd59fa4c0ae339505323df6854bb15958" + integrity sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg== + dependencies: + call-bind "^1.0.2" + has-tostringtag "^1.0.0" + +is-retry-allowed@^1.0.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/is-retry-allowed/-/is-retry-allowed-1.2.0.tgz#d778488bd0a4666a3be8a1482b9f2baafedea8b4" + integrity sha512-RUbUeKwvm3XG2VYamhJL1xFktgjvPzL0Hq8C+6yrWIswDy3BIXGqCxhxkc30N9jqK311gVU137K8Ei55/zVJRg== + +is-set@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/is-set/-/is-set-2.0.2.tgz#90755fa4c2562dc1c5d4024760d6119b94ca18ec" + integrity sha512-+2cnTEZeY5z/iXGbLhPrOAaK/Mau5k5eXq9j14CpRTftq0pAJu2MwVRSZhyZWBzx3o6X795Lz6Bpb6R0GKf37g== + +is-shared-array-buffer@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-shared-array-buffer/-/is-shared-array-buffer-1.0.1.tgz#97b0c85fbdacb59c9c446fe653b82cf2b5b7cfe6" + integrity sha512-IU0NmyknYZN0rChcKhRO1X8LYz5Isj/Fsqh8NJOSf+N/hCOTwy29F32Ik7a+QszE63IdvmwdTPDd6cZ5pg4cwA== + +is-stream@^1.0.0, is-stream@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44" + integrity sha1-EtSj3U5o4Lec6428hBc66A2RykQ= + +is-stream@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-2.0.1.tgz#fac1e3d53b97ad5a9d0ae9cef2389f5810a5c077" + integrity sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg== + +is-string@^1.0.5, is-string@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/is-string/-/is-string-1.0.7.tgz#0dd12bf2006f255bb58f695110eff7491eebc0fd" + integrity sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg== + dependencies: + has-tostringtag "^1.0.0" + +is-symbol@^1.0.2, is-symbol@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-symbol/-/is-symbol-1.0.4.tgz#a6dac93b635b063ca6872236de88910a57af139c" + integrity sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg== + dependencies: + has-symbols "^1.0.2" + +is-typed-array@^1.1.3: + version "1.1.5" + resolved "https://registry.yarnpkg.com/is-typed-array/-/is-typed-array-1.1.5.tgz#f32e6e096455e329eb7b423862456aa213f0eb4e" + integrity sha512-S+GRDgJlR3PyEbsX/Fobd9cqpZBuvUS+8asRqYDMLCb2qMzt1oz5m5oxQCxOgUDxiWsOVNi4yaF+/uvdlHlYug== + dependencies: + available-typed-arrays "^1.0.2" + call-bind "^1.0.2" + es-abstract "^1.18.0-next.2" + foreach "^2.0.5" + has-symbols "^1.0.1" + +is-typedarray@^1.0.0, is-typedarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" + integrity sha1-5HnICFjfDBsR3dppQPlgEfzaSpo= + +is-upper-case@^1.1.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/is-upper-case/-/is-upper-case-1.1.2.tgz#8d0b1fa7e7933a1e58483600ec7d9661cbaf756f" + integrity sha1-jQsfp+eTOh5YSDYA7H2WYcuvdW8= + dependencies: + upper-case "^1.1.0" + +is-utf8@^0.2.0: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-utf8/-/is-utf8-0.2.1.tgz#4b0da1442104d1b336340e80797e865cf39f7d72" + integrity sha1-Sw2hRCEE0bM2NA6AeX6GXPOffXI= + +is-valid-glob@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/is-valid-glob/-/is-valid-glob-0.3.0.tgz#d4b55c69f51886f9b65c70d6c2622d37e29f48fe" + integrity sha1-1LVcafUYhvm2XHDWwmItN+KfSP4= + +is-weakref@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-weakref/-/is-weakref-1.0.1.tgz#842dba4ec17fa9ac9850df2d6efbc1737274f2a2" + integrity sha512-b2jKc2pQZjaeFYWEf7ScFj+Be1I+PXmlu572Q8coTXZ+LD/QQZ7ShPMst8h16riVgyXTQwUsFEl74mDvc/3MHQ== + dependencies: + call-bind "^1.0.0" + +isarray@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf" + integrity sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ== + +isarray@1.0.0, isarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" + integrity sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE= + +isarray@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-2.0.5.tgz#8af1e4c1221244cc62459faf38940d4e644a5723" + integrity sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw== + +isexe@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" + integrity sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw== + +iso-random-stream@^1.1.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/iso-random-stream/-/iso-random-stream-1.1.2.tgz#c703da2c518db573277c5678cc43c5298283d64c" + integrity sha512-7y0tsBBgQs544iTYjyrMp5xvgrbYR8b+plQq1Bryp+03p0LssrxC9C1M0oHv4QESDt7d95c74XvMk/yawKqX+A== + dependencies: + buffer "^6.0.3" + readable-stream "^3.4.0" + +iso-stream-http@~0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/iso-stream-http/-/iso-stream-http-0.1.2.tgz#b3dfea4c9f23ff26d078d40c539cfc0dfebacd37" + integrity sha512-oHEDNOysIMTNypbg2f1SlydqRBvjl4ZbSE9+0awVxnkx3K2stGTFwB/kpVqnB6UEfF8QD36kAjDwZvqyXBLMnQ== + dependencies: + builtin-status-codes "^3.0.0" + inherits "^2.0.1" + readable-stream "^3.1.1" + +iso-url@~0.4.6, iso-url@~0.4.7: + version "0.4.7" + resolved "https://registry.yarnpkg.com/iso-url/-/iso-url-0.4.7.tgz#de7e48120dae46921079fe78f325ac9e9217a385" + integrity sha512-27fFRDnPAMnHGLq36bWTpKET+eiXct3ENlCcdcMdk+mjXrb2kw3mhBUg1B7ewAC0kVzlOPhADzQgz1SE6Tglog== + +isobject@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89" + integrity sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk= + dependencies: + isarray "1.0.0" + +isobject@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/isobject/-/isobject-4.0.0.tgz#3f1c9155e73b192022a80819bacd0343711697b0" + integrity sha512-S/2fF5wH8SJA/kmwr6HYhK/RI/OkhD84k8ntalo0iJjZikgq1XFvR5M8NPT1x5F7fBwCG3qHfnzeP/Vh/ZxCUA== + +isomorphic-ws@4.0.1, isomorphic-ws@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/isomorphic-ws/-/isomorphic-ws-4.0.1.tgz#55fd4cd6c5e6491e76dc125938dd863f5cd4f2dc" + integrity sha512-BhBvN2MBpWTaSHdWRb/bwdZJ1WaehQ2L1KngkCkfLUGF0mAWAT1sQUQacEmQ0jXkFw/czDXPNQSL5u2/Krsz1w== + +isstream@~0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a" + integrity sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g== + +isurl@^1.0.0-alpha5: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isurl/-/isurl-1.0.0.tgz#b27f4f49f3cdaa3ea44a0a5b7f3462e6edc39d67" + integrity sha512-1P/yWsxPlDtn7QeRD+ULKQPaIaN6yF368GZ2vDfv0AL0NwpStafjWCDDdn0k8wgFMWpVAqG7oJhxHnlud42i9w== + dependencies: + has-to-string-tag-x "^1.2.0" + is-object "^1.0.1" + +iterable-ndjson@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/iterable-ndjson/-/iterable-ndjson-1.1.0.tgz#36f7e8a5bb04fd087d384f29e44fc4280fc014fc" + integrity sha512-OOp1Lb0o3k5MkXHx1YaIY5Z0ELosZfTnBaas9f8opJVcZGBIONA2zY/6CYE+LKkqrSDooIneZbrBGgOZnHPkrg== + dependencies: + string_decoder "^1.2.0" + +iterall@^1.1.3, iterall@^1.2.1, iterall@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/iterall/-/iterall-1.3.0.tgz#afcb08492e2915cbd8a0884eb93a8c94d0d72fea" + integrity sha512-QZ9qOMdF+QLHxy1QIpUHUU1D5pS2CG2P69LF6L6CPjPYA/XMOmKV3PZpawHoAjHNyB0swdVTRxdYT4tbBbxqwg== + +iterate-iterator@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/iterate-iterator/-/iterate-iterator-1.0.1.tgz#1693a768c1ddd79c969051459453f082fe82e9f6" + integrity sha512-3Q6tudGN05kbkDQDI4CqjaBf4qf85w6W6GnuZDtUVYwKgtC1q8yxYX7CZed7N+tLzQqS6roujWvszf13T+n9aw== + +iterate-value@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/iterate-value/-/iterate-value-1.0.2.tgz#935115bd37d006a52046535ebc8d07e9c9337f57" + integrity sha512-A6fMAio4D2ot2r/TYzr4yUWrmwNdsN5xL7+HUiyACE4DXm+q8HtPcnFTp+NnW3k4N05tZ7FVYFFb2CR13NxyHQ== + dependencies: + es-get-iterator "^1.0.2" + iterate-iterator "^1.0.1" + +jayson@3.6.6: + version "3.6.6" + resolved "https://registry.yarnpkg.com/jayson/-/jayson-3.6.6.tgz#189984f624e398f831bd2be8e8c80eb3abf764a1" + integrity sha512-f71uvrAWTtrwoww6MKcl9phQTC+56AopLyEenWvKVAIMz+q0oVGj6tenLZ7Z6UiPBkJtKLj4kt0tACllFQruGQ== + dependencies: + "@types/connect" "^3.4.33" + "@types/express-serve-static-core" "^4.17.9" + "@types/lodash" "^4.14.159" + "@types/node" "^12.12.54" + "@types/ws" "^7.4.4" + JSONStream "^1.3.5" + commander "^2.20.3" + delay "^5.0.0" + es6-promisify "^5.0.0" + eyes "^0.1.8" + isomorphic-ws "^4.0.1" + json-stringify-safe "^5.0.1" + lodash "^4.17.20" + uuid "^8.3.2" + ws "^7.4.5" + +js-sha3@0.5.7, js-sha3@^0.5.7: + version "0.5.7" + resolved "https://registry.yarnpkg.com/js-sha3/-/js-sha3-0.5.7.tgz#0d4ffd8002d5333aabaf4a23eed2f6374c9f28e7" + integrity sha1-DU/9gALVMzqrr0oj7tL2N0yfKOc= + +js-sha3@0.8.0, js-sha3@^0.8.0, js-sha3@~0.8.0: + version "0.8.0" + resolved "https://registry.yarnpkg.com/js-sha3/-/js-sha3-0.8.0.tgz#b9b7a5da73afad7dedd0f8c463954cbde6818840" + integrity sha512-gF1cRrHhIzNfToc802P800N8PpXS+evLLXfsVpowqmAFR9uwbi89WvXg2QspOmXL8QL86J4T1EpFu+yUkwJY3Q== + +"js-tokens@^3.0.0 || ^4.0.0", js-tokens@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" + integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== + +js-tokens@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.2.tgz#9866df395102130e38f7f996bceb65443209c25b" + integrity sha1-mGbfOVECEw449/mWvOtlRDIJwls= + +js-yaml@3.13.1: + version "3.13.1" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.13.1.tgz#aff151b30bfdfa8e49e05da22e7415e9dfa37847" + integrity sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw== + dependencies: + argparse "^1.0.7" + esprima "^4.0.0" + +js-yaml@3.14.0: + version "3.14.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.14.0.tgz#a7a34170f26a21bb162424d8adacb4113a69e482" + integrity sha512-/4IbIeHcD9VMHFqDR/gQ7EdZdLimOvW2DdcxFjdyyZ9NsbS+ccrXqVWDtab/lRl5AlUqmpBx8EhPaWR+OtY17A== + dependencies: + argparse "^1.0.7" + esprima "^4.0.0" + +jsan@^3.1.13: + version "3.1.13" + resolved "https://registry.yarnpkg.com/jsan/-/jsan-3.1.13.tgz#4de8c7bf8d1cfcd020c313d438f930cec4b91d86" + integrity sha512-9kGpCsGHifmw6oJet+y8HaCl14y7qgAsxVdV3pCHDySNR3BfDC30zgkssd7x5LRVAT22dnpbe9JdzzmXZnq9/g== + +jsbn@~0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513" + integrity sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg== + +jsdom@^7.0.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-7.2.2.tgz#40b402770c2bda23469096bee91ab675e3b1fc6e" + integrity sha1-QLQCdwwr2iNGkJa+6Rq2deOx/G4= + dependencies: + abab "^1.0.0" + acorn "^2.4.0" + acorn-globals "^1.0.4" + cssom ">= 0.3.0 < 0.4.0" + cssstyle ">= 0.2.29 < 0.3.0" + escodegen "^1.6.1" + nwmatcher ">= 1.3.7 < 2.0.0" + parse5 "^1.5.1" + request "^2.55.0" + sax "^1.1.4" + symbol-tree ">= 3.1.0 < 4.0.0" + tough-cookie "^2.2.0" + webidl-conversions "^2.0.0" + whatwg-url-compat "~0.6.5" + xml-name-validator ">= 2.0.1 < 3.0.0" + +jsesc@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-1.3.0.tgz#46c3fec8c1892b12b0833db9bc7622176dbab34b" + integrity sha1-RsP+yMGJKxKwgz25vHYiF226s0s= + +jsesc@^2.5.1: + version "2.5.2" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4" + integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA== + +json-buffer@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.0.tgz#5b1f397afc75d677bde8bcfc0e47e1f9a3d9a898" + integrity sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg= + +json-parse-even-better-errors@^2.3.0: + version "2.3.1" + resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz#7c47805a94319928e05777405dc12e1f7a4ee02d" + integrity sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w== + +json-pointer@^0.6.0: + version "0.6.1" + resolved "https://registry.yarnpkg.com/json-pointer/-/json-pointer-0.6.1.tgz#3c6caa6ac139e2599f5a1659d39852154015054d" + integrity sha512-3OvjqKdCBvH41DLpV4iSt6v2XhZXV1bPB4OROuknvUXI7ZQNofieCPkmE26stEJ9zdQuvIxDHCuYhfgxFAAs+Q== + dependencies: + foreach "^2.0.4" + +json-rpc-engine@^5.1.3: + version "5.4.0" + resolved "https://registry.yarnpkg.com/json-rpc-engine/-/json-rpc-engine-5.4.0.tgz#75758609d849e1dba1e09021ae473f3ab63161e5" + integrity sha512-rAffKbPoNDjuRnXkecTjnsE3xLLrb00rEkdgalINhaYVYIxDwWtvYBr9UFbhTvPB1B2qUOLoFd/cV6f4Q7mh7g== + dependencies: + eth-rpc-errors "^3.0.0" + safe-event-emitter "^1.0.1" + +json-rpc-random-id@^1.0.0, json-rpc-random-id@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json-rpc-random-id/-/json-rpc-random-id-1.0.1.tgz#ba49d96aded1444dbb8da3d203748acbbcdec8c8" + integrity sha1-uknZat7RRE27jaPSA3SKy7zeyMg= + +json-schema-traverse@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" + integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== + +json-schema@0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.4.0.tgz#f7de4cf6efab838ebaeb3236474cbba5a1930ab5" + integrity sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA== + +json-stable-stringify-without-jsonify@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz#9db7b59496ad3f3cfef30a75142d2d930ad72651" + integrity sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE= + +json-stable-stringify@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz#9a759d39c5f2ff503fd5300646ed445f88c4f9af" + integrity sha1-mnWdOcXy/1A/1TAGRu1EX4jE+a8= + dependencies: + jsonify "~0.0.0" + +json-stringify-safe@^5.0.1, json-stringify-safe@~5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" + integrity sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA== + +json-text-sequence@~0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/json-text-sequence/-/json-text-sequence-0.1.1.tgz#a72f217dc4afc4629fff5feb304dc1bd51a2f3d2" + integrity sha512-L3mEegEWHRekSHjc7+sc8eJhba9Clq1PZ8kMkzf8OxElhXc8O4TS5MwcVlj9aEbm5dr81N90WHC5nAz3UO971w== + dependencies: + delimit-stream "0.1.0" + +json5@^0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/json5/-/json5-0.5.1.tgz#1eade7acc012034ad84e2396767ead9fa5495821" + integrity sha1-Hq3nrMASA0rYTiOWdn6tn6VJWCE= + +json5@^2.1.2: + version "2.2.0" + resolved "https://registry.yarnpkg.com/json5/-/json5-2.2.0.tgz#2dfefe720c6ba525d9ebd909950f0515316c89a3" + integrity sha512-f+8cldu7X/y7RAJurMEJmdoKXGB/X550w2Nr3tTbezL6RwEE/iMcm+tZnXeoZtKuOq6ft8+CqzEkrIgx1fPoQA== + dependencies: + minimist "^1.2.5" + +jsondown@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/jsondown/-/jsondown-1.0.0.tgz#c5cc5cda65f515d2376136a104b5f535534f26e3" + integrity sha512-p6XxPaq59aXwcdDQV3ISMA5xk+1z6fJuctcwwSdR9iQgbYOcIrnknNrhcMGG+0FaUfKHGkdDpQNaZrovfBoyOw== + dependencies: + memdown "1.4.1" + mkdirp "0.5.1" + +jsonfile@^2.1.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-2.4.0.tgz#3736a2b428b87bbda0cc83b53fa3d633a35c2ae8" + integrity sha1-NzaitCi4e72gzIO1P6PWM6NcKug= + optionalDependencies: + graceful-fs "^4.1.6" + +jsonfile@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-4.0.0.tgz#8771aae0799b64076b76640fca058f9c10e33ecb" + integrity sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss= + optionalDependencies: + graceful-fs "^4.1.6" + +jsonfile@^6.0.1: + version "6.1.0" + resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-6.1.0.tgz#bc55b2634793c679ec6403094eb13698a6ec0aae" + integrity sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ== + dependencies: + universalify "^2.0.0" + optionalDependencies: + graceful-fs "^4.1.6" + +jsonify@~0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73" + integrity sha1-LHS27kHZPKUbe1qu6PUDYx0lKnM= + +jsonparse@^1.2.0: + version "1.3.1" + resolved "https://registry.yarnpkg.com/jsonparse/-/jsonparse-1.3.1.tgz#3f4dae4a91fac315f71062f8521cc239f1366280" + integrity sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg== + +jsprim@^1.2.2: + version "1.4.2" + resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.2.tgz#712c65533a15c878ba59e9ed5f0e26d5b77c5feb" + integrity sha512-P2bSOMAc/ciLz6DzgjVlGJP9+BrJWu5UDGK70C2iweC5QBIeFf0ZXRvGjEj2uYgrY2MkAAhsSWHDWlFtEroZWw== + dependencies: + assert-plus "1.0.0" + extsprintf "1.3.0" + json-schema "0.4.0" + verror "1.10.0" + +just-kebab-case@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/just-kebab-case/-/just-kebab-case-1.1.0.tgz#ebe854fde84b0afa4e597fcd870b12eb3c026755" + integrity sha512-QkuwuBMQ9BQHMUEkAtIA4INLrkmnnveqlFB1oFi09gbU0wBdZo6tTnyxNWMR84zHxBuwK7GLAwqN8nrvVxOLTA== + +just-map-keys@^1.1.0: + version "1.2.1" + resolved "https://registry.yarnpkg.com/just-map-keys/-/just-map-keys-1.2.1.tgz#ef6e16133b7d34329962dfae9101d581abb1b143" + integrity sha512-Dmyz1Cy2SWM+PpqDPB1kdDglyexdzMthnAsvOIE9w4OPj8NDRuY1mh20x/JfG5w6fCGw9F0WmcofJhYZ4MiuyA== + +keccak@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/keccak/-/keccak-3.0.2.tgz#4c2c6e8c54e04f2670ee49fa734eb9da152206e0" + integrity sha512-PyKKjkH53wDMLGrvmRGSNWgmSxZOUqbnXwKL9tmgbFYA1iAYqW21kfR7mZXV0MlESiefxQQE9X9fTa3X+2MPDQ== + dependencies: + node-addon-api "^2.0.0" + node-gyp-build "^4.2.0" + readable-stream "^3.6.0" + +keypair@^1.0.1: + version "1.0.4" + resolved "https://registry.yarnpkg.com/keypair/-/keypair-1.0.4.tgz#a749a45f388593f3950f18b3757d32a93bd8ce83" + integrity sha512-zwhgOhhniaL7oxMgUMKKw5219PWWABMO+dgMnzJOQ2/5L3XJtTJGhW2PEXlxXj9zaccdReZJZ83+4NPhVfNVDg== + +keyv@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/keyv/-/keyv-3.1.0.tgz#ecc228486f69991e49e9476485a5be1e8fc5c4d9" + integrity sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA== + dependencies: + json-buffer "3.0.0" + +kind-of@^3.0.2: + version "3.2.2" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.2.tgz#31ea21a734bab9bbb0f32466d893aea51e4a3c64" + integrity sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ= + dependencies: + is-buffer "^1.1.5" + +kind-of@^6.0.0, kind-of@^6.0.2: + version "6.0.3" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.3.tgz#07c05034a6c349fa06e24fa35aa76db4580ce4dd" + integrity sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw== + +klaw@^1.0.0: + version "1.3.1" + resolved "https://registry.yarnpkg.com/klaw/-/klaw-1.3.1.tgz#4088433b46b3b1ba259d78785d8e96f73ba02439" + integrity sha1-QIhDO0azsbolnXh4XY6W9zugJDk= + optionalDependencies: + graceful-fs "^4.1.9" + +ky-universal@^0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/ky-universal/-/ky-universal-0.2.2.tgz#7a36e1a75641a98f878157463513965f799f5bfe" + integrity sha512-fb32o/fKy/ux2ALWa9HU2hvGtfOq7/vn2nH0FpVE+jwNzyTeORlAbj3Fiw+WLMbUlmVqZIWupnLZ2USHvqwZHw== + dependencies: + abort-controller "^3.0.0" + node-fetch "^2.3.0" + +ky@^0.11.2: + version "0.11.2" + resolved "https://registry.yarnpkg.com/ky/-/ky-0.11.2.tgz#4ffe6621d9d9ab61bf0f5500542e3a96d1ba0815" + integrity sha512-5Aou5BWue5/mkPqIRqzSWW+0Hkl403pr/2AIrCKYw7cVl/Xoe8Xe4KLBO0PRjbz7GnRe1/8wW1KhqQNFFE7/GQ== + +lazy-debug-legacy@0.0.X: + version "0.0.1" + resolved "https://registry.yarnpkg.com/lazy-debug-legacy/-/lazy-debug-legacy-0.0.1.tgz#537716c0776e4cf79e3ed1b621f7658c2911b1b1" + integrity sha1-U3cWwHduTPeePtG2IfdljCkRsbE= + +lazystream@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/lazystream/-/lazystream-1.0.0.tgz#f6995fe0f820392f61396be89462407bb77168e4" + integrity sha1-9plf4PggOS9hOWvolGJAe7dxaOQ= + dependencies: + readable-stream "^2.0.5" + +lcid@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/lcid/-/lcid-1.0.0.tgz#308accafa0bc483a3867b4b6f2b9506251d1b835" + integrity sha1-MIrMr6C8SDo4Z7S28rlQYlHRuDU= + dependencies: + invert-kv "^1.0.0" + +level-codec@9.0.1: + version "9.0.1" + resolved "https://registry.yarnpkg.com/level-codec/-/level-codec-9.0.1.tgz#042f4aa85e56d4328ace368c950811ba802b7247" + integrity sha512-ajFP0kJ+nyq4i6kptSM+mAvJKLOg1X5FiFPtLG9M5gCEZyBmgDi3FkDrvlMkEzrUn1cWxtvVmrvoS4ASyO/q+Q== + +level-codec@9.0.2, level-codec@^9.0.0: + version "9.0.2" + resolved "https://registry.yarnpkg.com/level-codec/-/level-codec-9.0.2.tgz#fd60df8c64786a80d44e63423096ffead63d8cbc" + integrity sha512-UyIwNb1lJBChJnGfjmO0OR+ezh2iVu1Kas3nvBS/BzGnx79dv6g7unpKIDNPMhfdTEGoc7mC8uAu51XEtX+FHQ== + dependencies: + buffer "^5.6.0" + +level-codec@~7.0.0: + version "7.0.1" + resolved "https://registry.yarnpkg.com/level-codec/-/level-codec-7.0.1.tgz#341f22f907ce0f16763f24bddd681e395a0fb8a7" + integrity sha512-Ua/R9B9r3RasXdRmOtd+t9TCOEIIlts+TN/7XTT2unhDaL6sJn83S3rUyljbr6lVtw49N3/yA0HHjpV6Kzb2aQ== + +level-concat-iterator@~2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/level-concat-iterator/-/level-concat-iterator-2.0.1.tgz#1d1009cf108340252cb38c51f9727311193e6263" + integrity sha512-OTKKOqeav2QWcERMJR7IS9CUo1sHnke2C0gkSmcR7QuEtFNLLzHQAvnMw8ykvEcv0Qtkg0p7FOwP1v9e5Smdcw== + +level-errors@^1.0.3: + version "1.1.2" + resolved "https://registry.yarnpkg.com/level-errors/-/level-errors-1.1.2.tgz#4399c2f3d3ab87d0625f7e3676e2d807deff404d" + integrity sha512-Sw/IJwWbPKF5Ai4Wz60B52yj0zYeqzObLh8k1Tk88jVmD51cJSKWSYpRyhVIvFzZdvsPqlH5wfhp/yxdsaQH4w== + dependencies: + errno "~0.1.1" + +level-errors@^2.0.0, level-errors@~2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/level-errors/-/level-errors-2.0.1.tgz#2132a677bf4e679ce029f517c2f17432800c05c8" + integrity sha512-UVprBJXite4gPS+3VznfgDSU8PTRuVX0NXwoWW50KLxd2yw4Y1t2JUR5In1itQnudZqRMT9DlAM3Q//9NCjCFw== + dependencies: + errno "~0.1.1" + +level-errors@~1.0.3: + version "1.0.5" + resolved "https://registry.yarnpkg.com/level-errors/-/level-errors-1.0.5.tgz#83dbfb12f0b8a2516bdc9a31c4876038e227b859" + integrity sha512-/cLUpQduF6bNrWuAC4pwtUKA5t669pCsCi2XbmojG2tFeOr9j6ShtdDCtFFQO1DRt+EVZhx9gPzP9G2bUaG4ig== + dependencies: + errno "~0.1.1" + +level-iterator-stream@~1.3.0: + version "1.3.1" + resolved "https://registry.yarnpkg.com/level-iterator-stream/-/level-iterator-stream-1.3.1.tgz#e43b78b1a8143e6fa97a4f485eb8ea530352f2ed" + integrity sha1-5Dt4sagUPm+pek9IXrjqUwNS8u0= + dependencies: + inherits "^2.0.1" + level-errors "^1.0.3" + readable-stream "^1.0.33" + xtend "^4.0.0" + +level-iterator-stream@~4.0.0: + version "4.0.2" + resolved "https://registry.yarnpkg.com/level-iterator-stream/-/level-iterator-stream-4.0.2.tgz#7ceba69b713b0d7e22fcc0d1f128ccdc8a24f79c" + integrity sha512-ZSthfEqzGSOMWoUGhTXdX9jv26d32XJuHz/5YnuHZzH6wldfWMOVwI9TBtKcya4BKTyTt3XVA0A3cF3q5CY30Q== + dependencies: + inherits "^2.0.4" + readable-stream "^3.4.0" + xtend "^4.0.2" + +level-js@^4.0.0: + version "4.0.2" + resolved "https://registry.yarnpkg.com/level-js/-/level-js-4.0.2.tgz#fa51527fa38b87c4d111b0d0334de47fcda38f21" + integrity sha512-PeGjZsyMG4O89KHiez1zoMJxStnkM+oBIqgACjoo5PJqFiSUUm3GNod/KcbqN5ktyZa8jkG7I1T0P2u6HN9lIg== + dependencies: + abstract-leveldown "~6.0.1" + immediate "~3.2.3" + inherits "^2.0.3" + ltgt "^2.1.2" + typedarray-to-buffer "~3.1.5" + +level-packager@^5.0.0: + version "5.1.1" + resolved "https://registry.yarnpkg.com/level-packager/-/level-packager-5.1.1.tgz#323ec842d6babe7336f70299c14df2e329c18939" + integrity sha512-HMwMaQPlTC1IlcwT3+swhqf/NUO+ZhXVz6TY1zZIIZlIR0YSn8GtAAWmIvKjNY16ZkEg/JcpAuQskxsXqC0yOQ== + dependencies: + encoding-down "^6.3.0" + levelup "^4.3.2" + +level-supports@~1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/level-supports/-/level-supports-1.0.1.tgz#2f530a596834c7301622521988e2c36bb77d122d" + integrity sha512-rXM7GYnW8gsl1vedTJIbzOrRv85c/2uCMpiiCzO2fndd06U/kUXEEU9evYn4zFggBOg36IsBW8LzqIpETwwQzg== + dependencies: + xtend "^4.0.2" + +level-write-stream@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/level-write-stream/-/level-write-stream-1.0.0.tgz#3f7fbb679a55137c0feb303dee766e12ee13c1dc" + integrity sha1-P3+7Z5pVE3wP6zA97nZuEu4Twdw= + dependencies: + end-stream "~0.1.0" + +level-ws@0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/level-ws/-/level-ws-0.0.0.tgz#372e512177924a00424b0b43aef2bb42496d228b" + integrity sha1-Ny5RIXeSSgBCSwtDrvK7QkltIos= + dependencies: + readable-stream "~1.0.15" + xtend "~2.1.1" + +level@5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/level/-/level-5.0.1.tgz#8528cc1ee37ac413270129a1eab938c610be3ccb" + integrity sha512-wcak5OQeA4rURGacqS62R/xNHjCYnJSQDBOlm4KNUGJVE9bWv2B04TclqReYejN+oD65PzD4FsqeWoI5wNC5Lg== + dependencies: + level-js "^4.0.0" + level-packager "^5.0.0" + leveldown "^5.0.0" + opencollective-postinstall "^2.0.0" + +leveldown@5.0.2: + version "5.0.2" + resolved "https://registry.yarnpkg.com/leveldown/-/leveldown-5.0.2.tgz#c8edc2308c8abf893ffc81e66ab6536111cae92c" + integrity sha512-Ib6ygFYBleS8x2gh3C1AkVsdrUShqXpe6jSTnZ6sRycEXKhqVf+xOSkhgSnjidpPzyv0d95LJVFrYQ4NuXAqHA== + dependencies: + abstract-leveldown "~6.0.0" + fast-future "~1.0.2" + napi-macros "~1.8.1" + node-gyp-build "~3.8.0" + +leveldown@^5.0.0: + version "5.6.0" + resolved "https://registry.yarnpkg.com/leveldown/-/leveldown-5.6.0.tgz#16ba937bb2991c6094e13ac5a6898ee66d3eee98" + integrity sha512-iB8O/7Db9lPaITU1aA2txU/cBEXAt4vWwKQRrrWuS6XDgbP4QZGj9BL2aNbwb002atoQ/lIotJkfyzz+ygQnUQ== + dependencies: + abstract-leveldown "~6.2.1" + napi-macros "~2.0.0" + node-gyp-build "~4.1.0" + +levelup@4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/levelup/-/levelup-4.0.2.tgz#bcb8d28d0a82ee97f1c6d00f20ea6d32c2803c5b" + integrity sha512-cx9PmLENwbGA3svWBEbeO2HazpOSOYSXH4VA+ahVpYyurvD+SDSfURl29VBY2qgyk+Vfy2dJd71SBRckj/EZVA== + dependencies: + deferred-leveldown "~5.0.0" + level-errors "~2.0.0" + level-iterator-stream "~4.0.0" + xtend "~4.0.0" + +levelup@4.4.0, levelup@^4.3.2: + version "4.4.0" + resolved "https://registry.yarnpkg.com/levelup/-/levelup-4.4.0.tgz#f89da3a228c38deb49c48f88a70fb71f01cafed6" + integrity sha512-94++VFO3qN95cM/d6eBXvd894oJE0w3cInq9USsyQzzoJxmiYzPAocNcuGCPGGjoXqDVJcr3C1jzt1TSjyaiLQ== + dependencies: + deferred-leveldown "~5.3.0" + level-errors "~2.0.0" + level-iterator-stream "~4.0.0" + level-supports "~1.0.0" + xtend "~4.0.0" + +levelup@^1.2.1: + version "1.3.9" + resolved "https://registry.yarnpkg.com/levelup/-/levelup-1.3.9.tgz#2dbcae845b2bb2b6bea84df334c475533bbd82ab" + integrity sha512-VVGHfKIlmw8w1XqpGOAGwq6sZm2WwWLmlDcULkKWQXEA5EopA8OBNJ2Ck2v6bdk8HeEZSbCSEgzXadyQFm76sQ== + dependencies: + deferred-leveldown "~1.2.1" + level-codec "~7.0.0" + level-errors "~1.0.3" + level-iterator-stream "~1.3.0" + prr "~1.0.1" + semver "~5.4.1" + xtend "~4.0.0" + +levn@~0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/levn/-/levn-0.3.0.tgz#3b09924edf9f083c0490fdd4c0bc4421e04764ee" + integrity sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4= + dependencies: + prelude-ls "~1.1.2" + type-check "~0.3.2" + +libp2p-crypto-secp256k1@~0.3.0: + version "0.3.1" + resolved "https://registry.yarnpkg.com/libp2p-crypto-secp256k1/-/libp2p-crypto-secp256k1-0.3.1.tgz#4cbeb857f5cfe5fefb1253e6b2994420c0ca166e" + integrity sha512-evrfK/CeUSd/lcELUdDruyPBvxDmLairth75S32OLl3H+++2m2fV24JEtxzdFS9JH3xEFw0h6JFO8DBa1bP9dA== + dependencies: + async "^2.6.2" + bs58 "^4.0.1" + multihashing-async "~0.6.0" + nodeify "^1.0.1" + safe-buffer "^5.1.2" + secp256k1 "^3.6.2" + +libp2p-crypto@~0.16.1: + version "0.16.4" + resolved "https://registry.yarnpkg.com/libp2p-crypto/-/libp2p-crypto-0.16.4.tgz#fb1a4ba39d56789303947784b5b0d6cefce12fdc" + integrity sha512-II8HxKc9jbmQp34pprlluNxsBCWJDjHRPYJzuRy7ragztNip9Zb7uJ4lCje6gGzz4DNAcHkAUn+GqCIK1592iA== + dependencies: + asmcrypto.js "^2.3.2" + asn1.js "^5.0.1" + async "^2.6.1" + bn.js "^4.11.8" + browserify-aes "^1.2.0" + bs58 "^4.0.1" + iso-random-stream "^1.1.0" + keypair "^1.0.1" + libp2p-crypto-secp256k1 "~0.3.0" + multihashing-async "~0.5.1" + node-forge "^0.10.0" + pem-jwk "^2.0.0" + protons "^1.0.1" + rsa-pem-to-jwk "^1.1.3" + tweetnacl "^1.0.0" + ursa-optional "~0.10.0" + +lines-and-columns@^1.1.6: + version "1.2.4" + resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz#eca284f75d2965079309dc0ad9255abb2ebc1632" + integrity sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg== + +linked-list@0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/linked-list/-/linked-list-0.1.0.tgz#798b0ff97d1b92a4fd08480f55aea4e9d49d37bf" + integrity sha1-eYsP+X0bkqT9CEgPVa6k6dSdN78= + +load-json-file@^1.0.0, load-json-file@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-1.1.0.tgz#956905708d58b4bab4c2261b04f59f31c99374c0" + integrity sha1-lWkFcI1YtLq0wiYbBPWfMcmTdMA= + dependencies: + graceful-fs "^4.1.2" + parse-json "^2.2.0" + pify "^2.0.0" + pinkie-promise "^2.0.0" + strip-bom "^2.0.0" + +locate-path@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-2.0.0.tgz#2b568b265eec944c6d9c0de9c3dbbbca0354cd8e" + integrity sha1-K1aLJl7slExtnA3pw9u7ygNUzY4= + dependencies: + p-locate "^2.0.0" + path-exists "^3.0.0" + +locate-path@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-3.0.0.tgz#dbec3b3ab759758071b58fe59fc41871af21400e" + integrity sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A== + dependencies: + p-locate "^3.0.0" + path-exists "^3.0.0" + +locate-path@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-5.0.0.tgz#1afba396afd676a6d42504d0a67a3a7eb9f62aa0" + integrity sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g== + dependencies: + p-locate "^4.1.0" + +locate-path@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-6.0.0.tgz#55321eb309febbc59c4801d931a72452a681d286" + integrity sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw== + dependencies: + p-locate "^5.0.0" + +lodash-es@^4.2.1: + version "4.17.21" + resolved "https://registry.yarnpkg.com/lodash-es/-/lodash-es-4.17.21.tgz#43e626c46e6591b7750beb2b50117390c609e3ee" + integrity sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw== + +lodash._reinterpolate@^3.0.0, lodash._reinterpolate@~3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/lodash._reinterpolate/-/lodash._reinterpolate-3.0.0.tgz#0ccf2d89166af03b3663c796538b75ac6e114d9d" + integrity sha1-DM8tiRZq8Ds2Y8eWU4t1rG4RTZ0= + +lodash.assign@^4.0.3, lodash.assign@^4.0.6: + version "4.2.0" + resolved "https://registry.yarnpkg.com/lodash.assign/-/lodash.assign-4.2.0.tgz#0d99f3ccd7a6d261d19bdaeb9245005d285808e7" + integrity sha1-DZnzzNem0mHRm9rrkkUAXShYCOc= + +lodash.assignin@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/lodash.assignin/-/lodash.assignin-4.2.0.tgz#ba8df5fb841eb0a3e8044232b0e263a8dc6a28a2" + integrity sha1-uo31+4QesKPoBEIysOJjqNxqKKI= + +lodash.assigninwith@^4.0.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/lodash.assigninwith/-/lodash.assigninwith-4.2.0.tgz#af02c98432ac86d93da695b4be801401971736af" + integrity sha1-rwLJhDKshtk9ppW0voAUAZcXNq8= + +lodash.camelcase@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz#b28aa6288a2b9fc651035c7711f65ab6190331a6" + integrity sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA== + +lodash.clonedeep@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz#e23f3f9c4f8fbdde872529c1071857a086e5ccef" + integrity sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8= + +lodash.debounce@^4.0.8: + version "4.0.8" + resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-4.0.8.tgz#82d79bff30a67c4005ffd5e2515300ad9ca4d7af" + integrity sha1-gteb/zCmfEAF/9XiUVMArZyk168= + +lodash.escaperegexp@^4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/lodash.escaperegexp/-/lodash.escaperegexp-4.1.2.tgz#64762c48618082518ac3df4ccf5d5886dae20347" + integrity sha1-ZHYsSGGAglGKw99Mz11YhtriA0c= + +lodash.flatmap@^4.5.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.flatmap/-/lodash.flatmap-4.5.0.tgz#ef8cbf408f6e48268663345305c6acc0b778702e" + integrity sha1-74y/QI9uSCaGYzRTBcaswLd4cC4= + +lodash.flatten@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/lodash.flatten/-/lodash.flatten-4.4.0.tgz#f31c22225a9632d2bbf8e4addbef240aa765a61f" + integrity sha1-8xwiIlqWMtK7+OSt2+8kCqdlph8= + +lodash.isequal@^4.0.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/lodash.isequal/-/lodash.isequal-4.5.0.tgz#415c4478f2bcc30120c22ce10ed3226f7d3e18e0" + integrity sha1-QVxEePK8wwEgwizhDtMib30+GOA= + +lodash.kebabcase@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/lodash.kebabcase/-/lodash.kebabcase-4.1.1.tgz#8489b1cb0d29ff88195cceca448ff6d6cc295c36" + integrity sha512-N8XRTIMMqqDgSy4VLKPnJ/+hpGZN+PHQiJnSenYqPaVV/NCqEogTnAdZLQiGKhxX+JCs8waWq2t1XHWKOmlY8g== + +lodash.keys@^4.0.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/lodash.keys/-/lodash.keys-4.2.0.tgz#a08602ac12e4fb83f91fc1fb7a360a4d9ba35205" + integrity sha1-oIYCrBLk+4P5H8H7ejYKTZujUgU= + +lodash.lowercase@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/lodash.lowercase/-/lodash.lowercase-4.3.0.tgz#46515aced4acb0b7093133333af068e4c3b14e9d" + integrity sha512-UcvP1IZYyDKyEL64mmrwoA1AbFu5ahojhTtkOUr1K9dbuxzS9ev8i4TxMMGCqRC9TE8uDaSoufNAXxRPNTseVA== + +lodash.lowerfirst@^4.3.1: + version "4.3.1" + resolved "https://registry.yarnpkg.com/lodash.lowerfirst/-/lodash.lowerfirst-4.3.1.tgz#de3c7b12e02c6524a0059c2f6cb7c5c52655a13d" + integrity sha512-UUKX7VhP1/JL54NXg2aq/E1Sfnjjes8fNYTNkPU8ZmsaVeBvPHKdbNaN79Re5XRL01u6wbq3j0cbYZj71Fcu5w== + +lodash.merge@^4.6.2: + version "4.6.2" + resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-4.6.2.tgz#558aa53b43b661e1925a0afdfa36a9a1085fe57a" + integrity sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ== + +lodash.pad@^4.5.1: + version "4.5.1" + resolved "https://registry.yarnpkg.com/lodash.pad/-/lodash.pad-4.5.1.tgz#4330949a833a7c8da22cc20f6a26c4d59debba70" + integrity sha512-mvUHifnLqM+03YNzeTBS1/Gr6JRFjd3rRx88FHWUvamVaT9k2O/kXha3yBSOwB9/DTQrSTLJNHvLBBt2FdX7Mg== + +lodash.padend@^4.6.1: + version "4.6.1" + resolved "https://registry.yarnpkg.com/lodash.padend/-/lodash.padend-4.6.1.tgz#53ccba047d06e158d311f45da625f4e49e6f166e" + integrity sha512-sOQs2aqGpbl27tmCS1QNZA09Uqp01ZzWfDUoD+xzTii0E7dSQfRKcRetFwa+uXaxaqL+TKm7CgD2JdKP7aZBSw== + +lodash.padstart@^4.6.1: + version "4.6.1" + resolved "https://registry.yarnpkg.com/lodash.padstart/-/lodash.padstart-4.6.1.tgz#d2e3eebff0d9d39ad50f5cbd1b52a7bce6bb611b" + integrity sha512-sW73O6S8+Tg66eY56DBk85aQzzUJDtpoXFBgELMd5P/SotAguo+1kYO6RuYgXxA4HJH3LFTFPASX6ET6bjfriw== + +lodash.partition@^4.6.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/lodash.partition/-/lodash.partition-4.6.0.tgz#a38e46b73469e0420b0da1212e66d414be364ba4" + integrity sha1-o45GtzRp4EILDaEhLmbUFL42S6Q= + +lodash.repeat@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/lodash.repeat/-/lodash.repeat-4.1.0.tgz#fc7de8131d8c8ac07e4b49f74ffe829d1f2bec44" + integrity sha512-eWsgQW89IewS95ZOcr15HHCX6FVDxq3f2PNUIng3fyzsPev9imFQxIYdFZ6crl8L56UR6ZlGDLcEb3RZsCSSqw== + +lodash.rest@^4.0.0: + version "4.0.5" + resolved "https://registry.yarnpkg.com/lodash.rest/-/lodash.rest-4.0.5.tgz#954ef75049262038c96d1fc98b28fdaf9f0772aa" + integrity sha1-lU73UEkmIDjJbR/Jiyj9r58Hcqo= + +lodash.snakecase@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/lodash.snakecase/-/lodash.snakecase-4.1.1.tgz#39d714a35357147837aefd64b5dcbb16becd8f8d" + integrity sha512-QZ1d4xoBHYUeuouhEq3lk3Uq7ldgyFXGBhg04+oRLnIz8o9T65Eh+8YdroUwn846zchkA9yDsDl5CVVaV2nqYw== + +lodash.sortby@^4.7.0: + version "4.7.0" + resolved "https://registry.yarnpkg.com/lodash.sortby/-/lodash.sortby-4.7.0.tgz#edd14c824e2cc9c1e0b0a1b42bb5210516a42438" + integrity sha1-7dFMgk4sycHgsKG0K7UhBRakJDg= + +lodash.startcase@^4.4.0: + version "4.4.0" + resolved "https://registry.yarnpkg.com/lodash.startcase/-/lodash.startcase-4.4.0.tgz#9436e34ed26093ed7ffae1936144350915d9add8" + integrity sha512-+WKqsK294HMSc2jEbNgpHpd0JfIBhp7rEV4aqXWqFr6AlXov+SlcgB1Fv01y2kGe3Gc8nMW7VA0SrGuSkRfIEg== + +lodash.sum@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/lodash.sum/-/lodash.sum-4.0.2.tgz#ad90e397965d803d4f1ff7aa5b2d0197f3b4637b" + integrity sha1-rZDjl5ZdgD1PH/eqWy0Bl/O0Y3s= + +lodash.template@4.2.4: + version "4.2.4" + resolved "https://registry.yarnpkg.com/lodash.template/-/lodash.template-4.2.4.tgz#d053c19e8e74e38d965bf4fb495d80f109e7f7a4" + integrity sha1-0FPBno50442WW/T7SV2A8Qnn96Q= + dependencies: + lodash._reinterpolate "~3.0.0" + lodash.assigninwith "^4.0.0" + lodash.keys "^4.0.0" + lodash.rest "^4.0.0" + lodash.templatesettings "^4.0.0" + lodash.tostring "^4.0.0" + +lodash.templatesettings@^4.0.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/lodash.templatesettings/-/lodash.templatesettings-4.2.0.tgz#e481310f049d3cf6d47e912ad09313b154f0fb33" + integrity sha512-stgLz+i3Aa9mZgnjr/O+v9ruKZsPsndy7qPZOchbqk2cnTU1ZaldKK+v7m54WoKIyxiuMZTKT2H81F8BeAc3ZQ== + dependencies: + lodash._reinterpolate "^3.0.0" + +lodash.tostring@^4.0.0: + version "4.1.4" + resolved "https://registry.yarnpkg.com/lodash.tostring/-/lodash.tostring-4.1.4.tgz#560c27d1f8eadde03c2cce198fef5c031d8298fb" + integrity sha1-Vgwn0fjq3eA8LM4Zj+9cAx2CmPs= + +lodash.trim@^4.5.1: + version "4.5.1" + resolved "https://registry.yarnpkg.com/lodash.trim/-/lodash.trim-4.5.1.tgz#36425e7ee90be4aa5e27bcebb85b7d11ea47aa57" + integrity sha512-nJAlRl/K+eiOehWKDzoBVrSMhK0K3A3YQsUNXHQa5yIrKBAhsZgSu3KoAFoFT+mEgiyBHddZ0pRk1ITpIp90Wg== + +lodash.trimend@^4.5.1: + version "4.5.1" + resolved "https://registry.yarnpkg.com/lodash.trimend/-/lodash.trimend-4.5.1.tgz#12804437286b98cad8996b79414e11300114082f" + integrity sha512-lsD+k73XztDsMBKPKvzHXRKFNMohTjoTKIIo4ADLn5dA65LZ1BqlAvSXhR2rPEC3BgAUQnzMnorqDtqn2z4IHA== + +lodash.trimstart@^4.5.1: + version "4.5.1" + resolved "https://registry.yarnpkg.com/lodash.trimstart/-/lodash.trimstart-4.5.1.tgz#8ff4dec532d82486af59573c39445914e944a7f1" + integrity sha512-b/+D6La8tU76L/61/aN0jULWHkT0EeJCmVstPBn/K9MtD2qBW83AsBNrr63dKuWYwVMO7ucv13QNO/Ek/2RKaQ== + +lodash.uppercase@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/lodash.uppercase/-/lodash.uppercase-4.3.0.tgz#c404abfd1469f93931f9bb24cf6cc7d57059bc73" + integrity sha512-+Nbnxkj7s8K5U8z6KnEYPGUOGp3woZbB7Ecs7v3LkkjLQSm2kP9SKIILitN1ktn2mB/tmM9oSlku06I+/lH7QA== + +lodash.upperfirst@^4.3.1: + version "4.3.1" + resolved "https://registry.yarnpkg.com/lodash.upperfirst/-/lodash.upperfirst-4.3.1.tgz#1365edf431480481ef0d1c68957a5ed99d49f7ce" + integrity sha512-sReKOYJIJf74dhJONhU4e0/shzi1trVbSWDOhKYE5XV2O+H7Sb2Dihwuc7xWxVl+DgFPyTqIN3zMfT9cq5iWDg== + +lodash.zipwith@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/lodash.zipwith/-/lodash.zipwith-4.2.0.tgz#afacf03fd2f384af29e263c3c6bda3b80e3f51fd" + integrity sha1-r6zwP9LzhK8p4mPDxr2juA4/Uf0= + +lodash@4.17.21, lodash@^4.1.0, lodash@^4.15.0, lodash@^4.17.11, lodash@^4.17.14, lodash@^4.17.19, lodash@^4.17.20, lodash@^4.17.4, lodash@^4.2.1: + version "4.17.21" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" + integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== + +log-symbols@4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-4.0.0.tgz#69b3cc46d20f448eccdb75ea1fa733d9e821c920" + integrity sha512-FN8JBzLx6CzeMrB0tg6pqlGU1wCrXW+ZXGH481kfsBqer0hToTIiHdjH4Mq8xJUbvATujKCvaREGWpGUionraA== + dependencies: + chalk "^4.0.0" + +log-symbols@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-2.2.0.tgz#5740e1c5d6f0dfda4ad9323b5332107ef6b4c40a" + integrity sha512-VeIAFslyIerEJLXHziedo2basKbMKtTw3vfn5IzG0XTjhAVEJyNHnL2p7vc+wBDSdQuUpNw3M2u6xb9QsAY5Eg== + dependencies: + chalk "^2.0.1" + +log-symbols@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-3.0.0.tgz#f3a08516a5dea893336a7dee14d18a1cfdab77c4" + integrity sha512-dSkNGuI7iG3mfvDzUuYZyvk5dD9ocYCYzNU6CYDE6+Xqd+gwme6Z00NS3dUh8mq/73HaEtT7m6W+yUPtU6BZnQ== + dependencies: + chalk "^2.4.2" + +loglevel@^1.6.7: + version "1.7.1" + resolved "https://registry.yarnpkg.com/loglevel/-/loglevel-1.7.1.tgz#005fde2f5e6e47068f935ff28573e125ef72f197" + integrity sha512-Hesni4s5UkWkwCGJMQGAh71PaLUmKFM60dHvq0zi/vDhhrzuk+4GgNbTXJ12YYQJn6ZKBDNIjYcuQGKudvqrIw== + +long@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/long/-/long-4.0.0.tgz#9a7b71cfb7d361a194ea555241c92f7468d5bf28" + integrity sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA== + +looper@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/looper/-/looper-3.0.0.tgz#2efa54c3b1cbaba9b94aee2e5914b0be57fbb749" + integrity sha512-LJ9wplN/uSn72oJRsXTx+snxPet5c8XiZmOKCm906NVYu+ag6SB6vUcnJcWxgnl2NfbIyeobAn7Bwv6xRj2XJg== + +loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" + integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q== + dependencies: + js-tokens "^3.0.0 || ^4.0.0" + +lower-case-first@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/lower-case-first/-/lower-case-first-1.0.2.tgz#e5da7c26f29a7073be02d52bac9980e5922adfa1" + integrity sha1-5dp8JvKacHO+AtUrrJmA5ZIq36E= + dependencies: + lower-case "^1.1.2" + +lower-case@^1.1.0, lower-case@^1.1.1, lower-case@^1.1.2: + version "1.1.4" + resolved "https://registry.yarnpkg.com/lower-case/-/lower-case-1.1.4.tgz#9a2cabd1b9e8e0ae993a4bf7d5875c39c42e8eac" + integrity sha1-miyr0bno4K6ZOkv31YdcOcQujqw= + +lower-case@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/lower-case/-/lower-case-2.0.2.tgz#6fa237c63dbdc4a82ca0fd882e4722dc5e634e28" + integrity sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg== + dependencies: + tslib "^2.0.3" + +lowercase-keys@^1.0.0, lowercase-keys@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.1.tgz#6f9e30b47084d971a7c820ff15a6c5167b74c26f" + integrity sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA== + +lowercase-keys@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-2.0.0.tgz#2603e78b7b4b0006cbca2fbcc8a3202558ac9479" + integrity sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA== + +lru-cache@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" + integrity sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w== + dependencies: + yallist "^3.0.2" + +lru-cache@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-6.0.0.tgz#6d6fe6570ebd96aaf90fcad1dafa3b2566db3a94" + integrity sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA== + dependencies: + yallist "^4.0.0" + +ltgt@2.2.1, ltgt@^2.1.2, ltgt@~2.2.0: + version "2.2.1" + resolved "https://registry.yarnpkg.com/ltgt/-/ltgt-2.2.1.tgz#f35ca91c493f7b73da0e07495304f17b31f87ee5" + integrity sha1-81ypHEk/e3PaDgdJUwTxezH4fuU= + +mafmt@^6.0.2: + version "6.0.10" + resolved "https://registry.yarnpkg.com/mafmt/-/mafmt-6.0.10.tgz#3ad251c78f14f8164e66f70fd3265662da41113a" + integrity sha512-FjHDnew6dW9lUu3eYwP0FvvJl9uvNbqfoJM+c1WJcSyutNEIlyu6v3f/rlPnD1cnmue38IjuHlhBdIh3btAiyw== + dependencies: + multiaddr "^6.1.0" + +mafmt@^7.0.0: + version "7.1.0" + resolved "https://registry.yarnpkg.com/mafmt/-/mafmt-7.1.0.tgz#4126f6d0eded070ace7dbbb6fb04977412d380b5" + integrity sha512-vpeo9S+hepT3k2h5iFxzEHvvR0GPBx9uKaErmnRzYNcaKb03DgOArjEMlgG4a9LcuZZ89a3I8xbeto487n26eA== + dependencies: + multiaddr "^7.3.0" + +make-dir@^1.0.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-1.3.0.tgz#79c1033b80515bd6d24ec9933e860ca75ee27f0c" + integrity sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ== + dependencies: + pify "^3.0.0" + +map-stream@0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/map-stream/-/map-stream-0.0.6.tgz#d2ef4eb811a28644c7a8989985c69c2fdd496827" + integrity sha1-0u9OuBGihkTHqJiZhcacL91JaCc= + +marked@0.3.19: + version "0.3.19" + resolved "https://registry.yarnpkg.com/marked/-/marked-0.3.19.tgz#5d47f709c4c9fc3c216b6d46127280f40b39d790" + integrity sha512-ea2eGWOqNxPcXv8dyERdSr/6FmzvWwzjMxpfGB/sbMccXoct+xY+YukPD+QTUZwyvK7BZwcr4m21WBOW41pAkg== + +math-random@^1.0.1: + version "1.0.4" + resolved "https://registry.yarnpkg.com/math-random/-/math-random-1.0.4.tgz#5dd6943c938548267016d4e34f057583080c514c" + integrity sha512-rUxjysqif/BZQH2yhd5Aaq7vXMSx9NdEsQcyA07uEzIvxgI7zIr33gGsh+RU0/XjmQpCW7RsVof1vlkvQVCK5A== + +md5.js@^1.3.4: + version "1.3.5" + resolved "https://registry.yarnpkg.com/md5.js/-/md5.js-1.3.5.tgz#b5d07b8e3216e3e27cd728d72f70d1e6a342005f" + integrity sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg== + dependencies: + hash-base "^3.0.0" + inherits "^2.0.1" + safe-buffer "^5.1.2" + +media-typer@0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748" + integrity sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ== + +memdown@1.4.1, memdown@^1.0.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/memdown/-/memdown-1.4.1.tgz#b4e4e192174664ffbae41361aa500f3119efe215" + integrity sha1-tOThkhdGZP+65BNhqlAPMRnv4hU= + dependencies: + abstract-leveldown "~2.7.1" + functional-red-black-tree "^1.0.1" + immediate "^3.2.3" + inherits "~2.0.1" + ltgt "~2.2.0" + safe-buffer "~5.1.1" + +memorystream@^0.3.1: + version "0.3.1" + resolved "https://registry.yarnpkg.com/memorystream/-/memorystream-0.3.1.tgz#86d7090b30ce455d63fbae12dda51a47ddcaf9b2" + integrity sha1-htcJCzDORV1j+64S3aUaR93K+bI= + +merge-descriptors@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61" + integrity sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w== + +merge-stream@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-1.0.1.tgz#4041202d508a342ba00174008df0c251b8c135e1" + integrity sha1-QEEgLVCKNCugAXQAjfDCUbjBNeE= + dependencies: + readable-stream "^2.0.1" + +merge-stream@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" + integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w== + +merge2@^1.3.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" + integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== + +merkle-patricia-tree@^2.1.2, merkle-patricia-tree@^2.3.2: + version "2.3.2" + resolved "https://registry.yarnpkg.com/merkle-patricia-tree/-/merkle-patricia-tree-2.3.2.tgz#982ca1b5a0fde00eed2f6aeed1f9152860b8208a" + integrity sha512-81PW5m8oz/pz3GvsAwbauj7Y00rqm81Tzad77tHBwU7pIAtN+TJnMSOJhxBKflSVYhptMMb9RskhqHqrSm1V+g== + dependencies: + async "^1.4.2" + ethereumjs-util "^5.0.0" + level-ws "0.0.0" + levelup "^1.2.1" + memdown "^1.0.0" + readable-stream "^2.0.0" + rlp "^2.0.0" + semaphore ">=1.0.1" + +methods@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee" + integrity sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w== + +micromatch@^2.3.7: + version "2.3.11" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-2.3.11.tgz#86677c97d1720b363431d04d0d15293bd38c1565" + integrity sha1-hmd8l9FyCzY0MdBNDRUpO9OMFWU= + dependencies: + arr-diff "^2.0.0" + array-unique "^0.2.1" + braces "^1.8.2" + expand-brackets "^0.1.4" + extglob "^0.3.1" + filename-regex "^2.0.0" + is-extglob "^1.0.0" + is-glob "^2.0.1" + kind-of "^3.0.2" + normalize-path "^2.0.1" + object.omit "^2.0.0" + parse-glob "^3.0.4" + regex-cache "^0.4.2" + +micromatch@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.2.tgz#4fcb0999bf9fbc2fcbdd212f6d629b9a56c39259" + integrity sha512-y7FpHSbMUMoyPbYUSzO6PaZ6FyRnQOpHuKwbo1G+Knck95XVU4QAiKdGEnj5wwoS7PlOgthX/09u5iFJ+aYf5Q== + dependencies: + braces "^3.0.1" + picomatch "^2.0.5" + +miller-rabin@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/miller-rabin/-/miller-rabin-4.0.1.tgz#f080351c865b0dc562a8462966daa53543c78a4d" + integrity sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA== + dependencies: + bn.js "^4.0.0" + brorand "^1.0.1" + +mime-db@1.46.0: + version "1.46.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.46.0.tgz#6267748a7f799594de3cbc8cde91def349661cee" + integrity sha512-svXaP8UQRZ5K7or+ZmfNhg2xX3yKDMUzqadsSqi4NCH/KomcH75MAMYAGVlvXn4+b/xOPhS3I2uHKRUzvjY7BQ== + +mime-db@1.52.0: + version "1.52.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70" + integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg== + +mime-types@^2.1.12, mime-types@~2.1.19, mime-types@~2.1.24, mime-types@~2.1.34: + version "2.1.35" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a" + integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw== + dependencies: + mime-db "1.52.0" + +mime-types@^2.1.16: + version "2.1.29" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.29.tgz#1d4ab77da64b91f5f72489df29236563754bb1b2" + integrity sha512-Y/jMt/S5sR9OaqteJtslsFZKWOIIqMACsJSiHghlCAyhf7jfVYjKBmLiX8OgpWeW+fjJ2b+Az69aPFPkUOY6xQ== + dependencies: + mime-db "1.46.0" + +mime@1.6.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/mime/-/mime-1.6.0.tgz#32cd9e5c64553bd58d19a568af452acff04981b1" + integrity sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg== + +mimic-fn@^1.0.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-1.2.0.tgz#820c86a39334640e99516928bd03fca88057d022" + integrity sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ== + +mimic-fn@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b" + integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg== + +mimic-response@^1.0.0, mimic-response@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-1.0.1.tgz#4923538878eef42063cb8a3e3b0798781487ab1b" + integrity sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ== + +min-document@^2.19.0: + version "2.19.0" + resolved "https://registry.yarnpkg.com/min-document/-/min-document-2.19.0.tgz#7bd282e3f5842ed295bb748cdd9f1ffa2c824685" + integrity sha1-e9KC4/WELtKVu3SM3Z8f+iyCRoU= + dependencies: + dom-walk "^0.1.0" + +min-indent@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/min-indent/-/min-indent-1.0.1.tgz#a63f681673b30571fbe8bc25686ae746eefa9869" + integrity sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg== + +minimalistic-assert@^1.0.0, minimalistic-assert@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz#2e194de044626d4a10e7f7fbc00ce73e83e4d5c7" + integrity sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A== + +minimalistic-crypto-utils@^1.0.0, minimalistic-crypto-utils@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz#f6c00c1c0b082246e5c4d99dfb8c7c083b2b582a" + integrity sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo= + +"minimatch@2 || 3", minimatch@3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" + integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA== + dependencies: + brace-expansion "^1.1.7" + +minimatch@^3.0.2, minimatch@^3.0.4, minimatch@^3.1.1: + version "3.1.2" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" + integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== + dependencies: + brace-expansion "^1.1.7" + +minimist@0.0.8: + version "0.0.8" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d" + integrity sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0= + +minimist@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284" + integrity sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ= + +minimist@^1.2.0, minimist@^1.2.5, minimist@^1.2.6: + version "1.2.6" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.6.tgz#8637a5b759ea0d6e98702cfb3a9283323c93af44" + integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q== + +minipass@^2.6.0, minipass@^2.9.0: + version "2.9.0" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-2.9.0.tgz#e713762e7d3e32fed803115cf93e04bca9fcc9a6" + integrity sha512-wxfUjg9WebH+CUDX/CdbRlh5SmfZiy/hpkxaRI16Y9W56Pa75sWgd/rvFilSgrauD9NyFymP/+JFV3KwzIsJeg== + dependencies: + safe-buffer "^5.1.2" + yallist "^3.0.0" + +minipass@^3.0.0: + version "3.3.4" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-3.3.4.tgz#ca99f95dd77c43c7a76bf51e6d200025eee0ffae" + integrity sha512-I9WPbWHCGu8W+6k1ZiGpPu0GkoKBeorkfKNuAFBNS1HNFJvke82sxvI5bzcCNpWPorkOO5QQ+zomzzwRxejXiw== + dependencies: + yallist "^4.0.0" + +minizlib@^1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-1.3.3.tgz#2290de96818a34c29551c8a8d301216bd65a861d" + integrity sha512-6ZYMOEnmVsdCeTJVE0W9ZD+pVnE8h9Hma/iOwwRDsdQoePpoX56/8B6z3P9VNwppJuBKNRuFDRNRqRWexT9G9Q== + dependencies: + minipass "^2.9.0" + +minizlib@^2.1.1: + version "2.1.2" + resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-2.1.2.tgz#e90d3466ba209b932451508a11ce3d3632145931" + integrity sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg== + dependencies: + minipass "^3.0.0" + yallist "^4.0.0" + +mkdirp-promise@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/mkdirp-promise/-/mkdirp-promise-5.0.1.tgz#e9b8f68e552c68a9c1713b84883f7a1dd039b8a1" + integrity sha1-6bj2jlUsaKnBcTuEiD96HdA5uKE= + dependencies: + mkdirp "*" + +mkdirp@*, mkdirp@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-1.0.4.tgz#3eb5ed62622756d79a5f0e2a221dfebad75c2f7e" + integrity sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw== + +mkdirp@0.5.1: + version "0.5.1" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903" + integrity sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM= + dependencies: + minimist "0.0.8" + +mkdirp@^0.5.0, mkdirp@^0.5.5: + version "0.5.5" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.5.tgz#d91cefd62d1436ca0f41620e251288d420099def" + integrity sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ== + dependencies: + minimist "^1.2.5" + +mkdirp@^0.5.1: + version "0.5.6" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.6.tgz#7def03d2432dcae4ba1d611445c48396062255f6" + integrity sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw== + dependencies: + minimist "^1.2.6" + +mocha@8.1.2: + version "8.1.2" + resolved "https://registry.yarnpkg.com/mocha/-/mocha-8.1.2.tgz#d67fad13300e4f5cd48135a935ea566f96caf827" + integrity sha512-I8FRAcuACNMLQn3lS4qeWLxXqLvGf6r2CaLstDpZmMUUSmvW6Cnm1AuHxgbc7ctZVRcfwspCRbDHymPsi3dkJw== + dependencies: + ansi-colors "4.1.1" + browser-stdout "1.3.1" + chokidar "3.4.2" + debug "4.1.1" + diff "4.0.2" + escape-string-regexp "4.0.0" + find-up "5.0.0" + glob "7.1.6" + growl "1.10.5" + he "1.2.0" + js-yaml "3.14.0" + log-symbols "4.0.0" + minimatch "3.0.4" + ms "2.1.2" + object.assign "4.1.0" + promise.allsettled "1.0.2" + serialize-javascript "4.0.0" + strip-json-comments "3.0.1" + supports-color "7.1.0" + which "2.0.2" + wide-align "1.1.3" + workerpool "6.0.0" + yargs "13.3.2" + yargs-parser "13.1.2" + yargs-unparser "1.6.1" + +mock-fs@^4.1.0: + version "4.13.0" + resolved "https://registry.yarnpkg.com/mock-fs/-/mock-fs-4.13.0.tgz#31c02263673ec3789f90eb7b6963676aa407a598" + integrity sha512-DD0vOdofJdoaRNtnWcrXe6RQbpHkPPmtqGq14uRX0F8ZKJ5nv89CVTYl/BZdppDxBDaV0hl75htg3abpEWlPZA== + +module@^1.2.5: + version "1.2.5" + resolved "https://registry.yarnpkg.com/module/-/module-1.2.5.tgz#b503eb06cdc13473f56818426974cde7ec59bf15" + integrity sha1-tQPrBs3BNHP1aBhCaXTN5+xZvxU= + dependencies: + chalk "1.1.3" + concat-stream "1.5.1" + lodash.template "4.2.4" + map-stream "0.0.6" + tildify "1.2.0" + vinyl-fs "2.4.3" + yargs "4.6.0" + +ms@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" + integrity sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A== + +ms@2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" + integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== + +ms@2.1.3, ms@^2.1.1: + version "2.1.3" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" + integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== + +multiaddr@^6.0.3, multiaddr@^6.0.6, multiaddr@^6.1.0: + version "6.1.1" + resolved "https://registry.yarnpkg.com/multiaddr/-/multiaddr-6.1.1.tgz#9aae57b3e399089b9896d9455afa8f6b117dff06" + integrity sha512-Q1Ika0F9MNhMtCs62Ue+GWIJtRFEhZ3Xz8wH7/MZDVZTWhil1/H2bEGN02kUees3hkI3q1oHSjmXYDM0gxaFjQ== + dependencies: + bs58 "^4.0.1" + class-is "^1.1.0" + hi-base32 "~0.5.0" + ip "^1.1.5" + is-ip "^2.0.0" + varint "^5.0.0" + +multiaddr@^7.2.1, multiaddr@^7.3.0: + version "7.5.0" + resolved "https://registry.yarnpkg.com/multiaddr/-/multiaddr-7.5.0.tgz#976c88e256e512263445ab03b3b68c003d5f485e" + integrity sha512-GvhHsIGDULh06jyb6ev+VfREH9evJCFIRnh3jUt9iEZ6XDbyoisZRFEI9bMvK/AiR6y66y6P+eoBw9mBYMhMvw== + dependencies: + buffer "^5.5.0" + cids "~0.8.0" + class-is "^1.1.0" + is-ip "^3.1.0" + multibase "^0.7.0" + varint "^5.0.0" + +multibase@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/multibase/-/multibase-0.7.0.tgz#1adfc1c50abe05eefeb5091ac0c2728d6b84581b" + integrity sha512-TW8q03O0f6PNFTQDvh3xxH03c8CjGaaYrjkl9UQPG6rz53TQzzxJVCIWVjzcbN/Q5Y53Zd0IBQBMVktVgNx4Fg== + dependencies: + base-x "^3.0.8" + buffer "^5.5.0" + +multibase@^1.0.0, multibase@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/multibase/-/multibase-1.0.1.tgz#4adbe1de0be8a1ab0274328b653c3f1903476724" + integrity sha512-KcCxpBVY8fdVKu4dJMAahq4F/2Z/9xqEjIiR7PiMe7LRGeorFn2NLmicN6nLBCqQvft6MG2Lc9X5P0IdyvnxEw== + dependencies: + base-x "^3.0.8" + buffer "^5.5.0" + +multibase@~0.6.0: + version "0.6.1" + resolved "https://registry.yarnpkg.com/multibase/-/multibase-0.6.1.tgz#b76df6298536cc17b9f6a6db53ec88f85f8cc12b" + integrity sha512-pFfAwyTjbbQgNc3G7D48JkJxWtoJoBMaR4xQUOuB8RnCgRqaYmWNFeJTTvrJ2w51bjLq2zTby6Rqj9TQ9elSUw== + dependencies: + base-x "^3.0.8" + buffer "^5.5.0" + +multicodec@^0.5.5, multicodec@~0.5.1: + version "0.5.7" + resolved "https://registry.yarnpkg.com/multicodec/-/multicodec-0.5.7.tgz#1fb3f9dd866a10a55d226e194abba2dcc1ee9ffd" + integrity sha512-PscoRxm3f+88fAtELwUnZxGDkduE2HD9Q6GHUOywQLjOGT/HAdhjLDYNZ1e7VR0s0TP0EwZ16LNUTFpoBGivOA== + dependencies: + varint "^5.0.0" + +multicodec@^1.0.0, multicodec@^1.0.1: + version "1.0.4" + resolved "https://registry.yarnpkg.com/multicodec/-/multicodec-1.0.4.tgz#46ac064657c40380c28367c90304d8ed175a714f" + integrity sha512-NDd7FeS3QamVtbgfvu5h7fd1IlbaC4EQ0/pgU4zqE2vdHCmBGsUa0TiM8/TdSeG6BMPC92OOCf8F1ocE/Wkrrg== + dependencies: + buffer "^5.6.0" + varint "^5.0.0" + +multihashes@^0.4.15, multihashes@~0.4.13, multihashes@~0.4.14, multihashes@~0.4.15: + version "0.4.21" + resolved "https://registry.yarnpkg.com/multihashes/-/multihashes-0.4.21.tgz#dc02d525579f334a7909ade8a122dabb58ccfcb5" + integrity sha512-uVSvmeCWf36pU2nB4/1kzYZjsXD9vofZKpgudqkceYY5g2aZZXJ5r9lxuzoRLl1OAp28XljXsEJ/X/85ZsKmKw== + dependencies: + buffer "^5.5.0" + multibase "^0.7.0" + varint "^5.0.0" + +multihashes@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/multihashes/-/multihashes-1.0.1.tgz#a89415d68283cf6287c6e219e304e75ce7fb73fe" + integrity sha512-S27Tepg4i8atNiFaU5ZOm3+gl3KQlUanLs/jWcBxQHFttgq+5x1OgbQmf2d8axJ/48zYGBd/wT9d723USMFduw== + dependencies: + buffer "^5.6.0" + multibase "^1.0.1" + varint "^5.0.0" + +multihashing-async@~0.5.1: + version "0.5.2" + resolved "https://registry.yarnpkg.com/multihashing-async/-/multihashing-async-0.5.2.tgz#4af40e0dde2f1dbb12a7c6b265181437ac26b9de" + integrity sha512-mmyG6M/FKxrpBh9xQDUvuJ7BbqT93ZeEeH5X6LeMYKoYshYLr9BDdCsvDtZvn+Egf+/Xi+aOznrWL4vp3s+p0Q== + dependencies: + blakejs "^1.1.0" + js-sha3 "~0.8.0" + multihashes "~0.4.13" + murmurhash3js "^3.0.1" + nodeify "^1.0.1" + +multihashing-async@~0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/multihashing-async/-/multihashing-async-0.6.0.tgz#c1fc6696a624b9bf39b160b0c4c4e7ba3f394453" + integrity sha512-Qv8pgg99Lewc191A5nlXy0bSd2amfqlafNJZmarU6Sj7MZVjpR94SCxQjf4DwPtgWZkiLqsjUQBXA2RSq+hYyA== + dependencies: + blakejs "^1.1.0" + js-sha3 "~0.8.0" + multihashes "~0.4.13" + murmurhash3js "^3.0.1" + nodeify "^1.0.1" + +multihashing-async@~0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/multihashing-async/-/multihashing-async-0.7.0.tgz#3234fb98295be84386b85bfd20377d3e5be20d6b" + integrity sha512-SCbfl3f+DzJh+/5piukga9ofIOxwfT05t8R4jfzZIJ88YE9zU9+l3K2X+XB19MYyxqvyK9UJRNWbmQpZqQlbRA== + dependencies: + blakejs "^1.1.0" + buffer "^5.2.1" + err-code "^1.1.2" + js-sha3 "~0.8.0" + multihashes "~0.4.13" + murmurhash3js-revisited "^3.0.0" + +multihashing-async@~0.8.0: + version "0.8.2" + resolved "https://registry.yarnpkg.com/multihashing-async/-/multihashing-async-0.8.2.tgz#3d5da05df27d83be923f6d04143a0954ff87f27f" + integrity sha512-2lKa1autuCy8x7KIEj9aVNbAb3aIMRFYIwN7mq/zD4pxgNIVgGlm+f6GKY4880EOF2Y3GktHYssRy7TAJQ2DyQ== + dependencies: + blakejs "^1.1.0" + buffer "^5.4.3" + err-code "^2.0.0" + js-sha3 "^0.8.0" + multihashes "^1.0.1" + murmurhash3js-revisited "^3.0.0" + +murmurhash3js-revisited@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/murmurhash3js-revisited/-/murmurhash3js-revisited-3.0.0.tgz#6bd36e25de8f73394222adc6e41fa3fac08a5869" + integrity sha512-/sF3ee6zvScXMb1XFJ8gDsSnY+X8PbOyjIuBhtgis10W2Jx4ZjIhikUCIF9c4gpJxVnQIsPAFrSwTCuAjicP6g== + +murmurhash3js@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/murmurhash3js/-/murmurhash3js-3.0.1.tgz#3e983e5b47c2a06f43a713174e7e435ca044b998" + integrity sha512-KL8QYUaxq7kUbcl0Yto51rMcYt7E/4N4BG3/c96Iqw1PQrTRspu8Cpx4TZ4Nunib1d4bEkIH3gjCYlP2RLBdow== + +mute-stream@0.0.8: + version "0.0.8" + resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.8.tgz#1630c42b2251ff81e2a283de96a5497ea92e5e0d" + integrity sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA== + +nan@^2.12.1: + version "2.14.2" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.14.2.tgz#f5376400695168f4cc694ac9393d0c9585eeea19" + integrity sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ== + +nan@^2.14.0, nan@^2.14.2: + version "2.16.0" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.16.0.tgz#664f43e45460fb98faf00edca0bb0d7b8dce7916" + integrity sha512-UdAqHyFngu7TfQKsCBgAA6pWDkT8MAO7d0jyOecVhN5354xbLqdn8mV9Tat9gepAupm0bt2DbeaSC8vS52MuFA== + +nano-json-stream-parser@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/nano-json-stream-parser/-/nano-json-stream-parser-0.1.2.tgz#0cc8f6d0e2b622b479c40d499c46d64b755c6f5f" + integrity sha1-DMj20OK2IrR5xA1JnEbWS3Vcb18= + +nanoid@^2.0.0: + version "2.1.11" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-2.1.11.tgz#ec24b8a758d591561531b4176a01e3ab4f0f0280" + integrity sha512-s/snB+WGm6uwi0WjsZdaVcuf3KJXlfGl2LcxgwkEwJF0D/BWzVWAZW/XY4bFaiR7s0Jk3FPvlnepg1H1b1UwlA== + +napi-macros@~1.8.1: + version "1.8.2" + resolved "https://registry.yarnpkg.com/napi-macros/-/napi-macros-1.8.2.tgz#299265c1d8aa401351ad0675107d751228c03eda" + integrity sha512-Tr0DNY4RzTaBG2W2m3l7ZtFuJChTH6VZhXVhkGGjF/4cZTt+i8GcM9ozD+30Lmr4mDoZ5Xx34t2o4GJqYWDGcg== + +napi-macros@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/napi-macros/-/napi-macros-2.0.0.tgz#2b6bae421e7b96eb687aa6c77a7858640670001b" + integrity sha512-A0xLykHtARfueITVDernsAWdtIMbOJgKgcluwENp3AlsKN/PloyO10HtmoqnFAQAcxPkgZN7wdfPfEd0zNGxbg== + +"ndjson@github:hugomrdias/ndjson#feat/readable-stream3": + version "1.5.0" + resolved "https://codeload.github.com/hugomrdias/ndjson/tar.gz/4db16da6b42e5b39bf300c3a7cde62abb3fa3a11" + dependencies: + json-stringify-safe "^5.0.1" + minimist "^1.2.0" + split2 "^3.1.0" + through2 "^3.0.0" + +needle@^2.2.1: + version "2.6.0" + resolved "https://registry.yarnpkg.com/needle/-/needle-2.6.0.tgz#24dbb55f2509e2324b4a99d61f413982013ccdbe" + integrity sha512-KKYdza4heMsEfSWD7VPUIz3zX2XDwOyX2d+geb4vrERZMT5RMU6ujjaD+I5Yr54uZxQ2w6XRTAhHBbSCyovZBg== + dependencies: + debug "^3.2.6" + iconv-lite "^0.4.4" + sax "^1.2.4" + +negotiator@0.6.3: + version "0.6.3" + resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.3.tgz#58e323a72fedc0d6f9cd4d31fe49f51479590ccd" + integrity sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg== + +next-tick@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/next-tick/-/next-tick-1.0.0.tgz#ca86d1fe8828169b0120208e3dc8424b9db8342c" + integrity sha1-yobR/ogoFpsBICCOPchCS524NCw= + +no-case@^2.2.0, no-case@^2.3.2: + version "2.3.2" + resolved "https://registry.yarnpkg.com/no-case/-/no-case-2.3.2.tgz#60b813396be39b3f1288a4c1ed5d1e7d28b464ac" + integrity sha512-rmTZ9kz+f3rCvK2TD1Ue/oZlns7OGoIWP4fc3llxxRXlOkHKoWPPWJOfFYpITabSow43QJbRIoHQXtt10VldyQ== + dependencies: + lower-case "^1.1.1" + +no-case@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/no-case/-/no-case-3.0.4.tgz#d361fd5c9800f558551a8369fc0dcd4662b6124d" + integrity sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg== + dependencies: + lower-case "^2.0.2" + tslib "^2.0.3" + +node-addon-api@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-2.0.2.tgz#432cfa82962ce494b132e9d72a15b29f71ff5d32" + integrity sha512-Ntyt4AIXyaLIuMHF6IOoTakB3K+RWxwtsHNRxllEoA6vPwP9o4866g6YWDLUdnucilZhmkxiHwHr11gAENw+QA== + +node-fetch@1.7.3, node-fetch@~1.7.1: + version "1.7.3" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-1.7.3.tgz#980f6f72d85211a5347c6b2bc18c5b84c3eb47ef" + integrity sha512-NhZ4CsKx7cYm2vSrBAr2PvFOe6sWDf0UYLRqA6svUYg7+/TSfVAu49jYC4BvQ4Sms9SZgdqGBgroqfDhJdTyKQ== + dependencies: + encoding "^0.1.11" + is-stream "^1.0.1" + +node-fetch@2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.1.2.tgz#ab884e8e7e57e38a944753cec706f788d1768bb5" + integrity sha1-q4hOjn5X44qUR1POxwb3iNF2i7U= + +node-fetch@2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.4.1.tgz#b2e38f1117b8acbedbe0524f041fb3177188255d" + integrity sha512-P9UbpFK87NyqBZzUuDBDz4f6Yiys8xm8j7ACDbi6usvFm6KItklQUKjeoqTrYS/S1k6I8oaOC2YLLDr/gg26Mw== + +node-fetch@2.6.0: + version "2.6.0" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.0.tgz#e633456386d4aa55863f676a7ab0daa8fdecb0fd" + integrity sha512-8dG4H5ujfvFiqDmVu9fQ5bOHUC15JMjMY/Zumv26oOvvVJjM67KF8koCWIabKQ1GJIa9r2mMZscBq/TbdOcmNA== + +node-fetch@2.6.1: + version "2.6.1" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.1.tgz#045bd323631f76ed2e2b55573394416b639a0052" + integrity sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw== + +node-fetch@^2.3.0: + version "2.6.7" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.7.tgz#24de9fba827e3b4ae44dc8b20256a379160052ad" + integrity sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ== + dependencies: + whatwg-url "^5.0.0" + +node-fetch@^2.6.1: + version "2.6.6" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.6.tgz#1751a7c01834e8e1697758732e9efb6eeadfaf89" + integrity sha512-Z8/6vRlTUChSdIgMa51jxQ4lrw/Jy5SOW10ObaA47/RElsAN2c5Pn8bTgFGWn/ibwzXTE8qwr1Yzx28vsecXEA== + dependencies: + whatwg-url "^5.0.0" + +node-forge@^0.10.0: + version "0.10.0" + resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-0.10.0.tgz#32dea2afb3e9926f02ee5ce8794902691a676bf3" + integrity sha512-PPmu8eEeG9saEUvI97fm4OYxXVB6bFvyNTyiUOBichBpFG8A1Ljw3bY62+5oOjDEMHRnd0Y7HQ+x7uzxOzC6JA== + +node-gyp-build@^4.2.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-4.5.0.tgz#7a64eefa0b21112f89f58379da128ac177f20e40" + integrity sha512-2iGbaQBV+ITgCz76ZEjmhUKAKVf7xfY1sRl4UiKQspfZMH2h06SyhNsnSVy50cwkFQDGLyif6m/6uFXHkOZ6rg== + +node-gyp-build@~3.8.0: + version "3.8.0" + resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-3.8.0.tgz#0f57efeb1971f404dfcbfab975c284de7c70f14a" + integrity sha512-bYbpIHyRqZ7sVWXxGpz8QIRug5JZc/hzZH4GbdT9HTZi6WmKCZ8GLvP8OZ9TTiIBvwPFKgtGrlWQSXDAvYdsPw== + +node-gyp-build@~4.1.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-4.1.1.tgz#d7270b5d86717068d114cc57fff352f96d745feb" + integrity sha512-dSq1xmcPDKPZ2EED2S6zw/b9NKsqzXRE6dVr8TVQnI3FJOTteUMuqF3Qqs6LZg+mLGYJWqQzMbIjMtJqTv87nQ== + +node-int64@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/node-int64/-/node-int64-0.4.0.tgz#87a9065cdb355d3182d8f94ce11188b825c68a3b" + integrity sha1-h6kGXNs1XTGC2PlM4RGIuCXGijs= + +node-interval-tree@^1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/node-interval-tree/-/node-interval-tree-1.3.3.tgz#15ffb904cde08270214acace8dc7653e89ae32b7" + integrity sha512-K9vk96HdTK5fEipJwxSvIIqwTqr4e3HRJeJrNxBSeVMNSC/JWARRaX7etOLOuTmrRMeOI/K5TCJu3aWIwZiNTw== + dependencies: + shallowequal "^1.0.2" + +node-pre-gyp@^0.11.0: + version "0.11.0" + resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.11.0.tgz#db1f33215272f692cd38f03238e3e9b47c5dd054" + integrity sha512-TwWAOZb0j7e9eGaf9esRx3ZcLaE5tQ2lvYy1pb5IAaG1a2e2Kv5Lms1Y4hpj+ciXJRofIxxlt5haeQ/2ANeE0Q== + dependencies: + detect-libc "^1.0.2" + mkdirp "^0.5.1" + needle "^2.2.1" + nopt "^4.0.1" + npm-packlist "^1.1.6" + npmlog "^4.0.2" + rc "^1.2.7" + rimraf "^2.6.1" + semver "^5.3.0" + tar "^4" + +node-releases@^1.1.70: + version "1.1.71" + resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.71.tgz#cb1334b179896b1c89ecfdd4b725fb7bbdfc7dbb" + integrity sha512-zR6HoT6LrLCRBwukmrVbHv0EpEQjksO6GmFcZQQuCAy139BEsoVKPYnf3jongYW83fAa1torLGYwxxky/p28sg== + +nodeify@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/nodeify/-/nodeify-1.0.1.tgz#64ab69a7bdbaf03ce107b4f0335c87c0b9e91b1d" + integrity sha512-n7C2NyEze8GCo/z73KdbjRsBiLbv6eBn1FxwYKQ23IqGo7pQY3mhQan61Sv7eEDJCiyUjTVrVkXTzJCo1dW7Aw== + dependencies: + is-promise "~1.0.0" + promise "~1.3.0" + +nofilter@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/nofilter/-/nofilter-1.0.4.tgz#78d6f4b6a613e7ced8b015cec534625f7667006e" + integrity sha512-N8lidFp+fCz+TD51+haYdbDGrcBWwuHX40F5+z0qkUjMJ5Tp+rdSuAkMJ9N9eoolDlEVTf6u5icM+cNKkKW2mA== + +noop-fn@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/noop-fn/-/noop-fn-1.0.0.tgz#5f33d47f13d2150df93e0cb036699e982f78ffbf" + integrity sha1-XzPUfxPSFQ35PgywNmmemC94/78= + +nopt@^4.0.1: + version "4.0.3" + resolved "https://registry.yarnpkg.com/nopt/-/nopt-4.0.3.tgz#a375cad9d02fd921278d954c2254d5aa57e15e48" + integrity sha512-CvaGwVMztSMJLOeXPrez7fyfObdZqNUK1cPAEzLHrTybIua9pMdmmPR5YwtfNftIOMv3DPUhFaxsZMNTQO20Kg== + dependencies: + abbrev "1" + osenv "^0.1.4" + +normalize-package-data@^2.3.2: + version "2.5.0" + resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.5.0.tgz#e66db1838b200c1dfc233225d12cb36520e234a8" + integrity sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA== + dependencies: + hosted-git-info "^2.1.4" + resolve "^1.10.0" + semver "2 || 3 || 4 || 5" + validate-npm-package-license "^3.0.1" + +normalize-path@^2.0.1, normalize-path@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9" + integrity sha1-GrKLVW4Zg2Oowab35vogE3/mrtk= + dependencies: + remove-trailing-separator "^1.0.1" + +normalize-path@^3.0.0, normalize-path@~3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" + integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA== + +normalize-url@^4.1.0: + version "4.5.0" + resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-4.5.0.tgz#453354087e6ca96957bd8f5baf753f5982142129" + integrity sha512-2s47yzUxdexf1OhyRi4Em83iQk0aPvwTddtFz4hnSSw9dCEsLEGf6SwIO8ss/19S9iBb5sJaOuTvTGDeZI00BQ== + +npm-bundled@^1.0.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/npm-bundled/-/npm-bundled-1.1.1.tgz#1edd570865a94cdb1bc8220775e29466c9fb234b" + integrity sha512-gqkfgGePhTpAEgUsGEgcq1rqPXA+tv/aVBlgEzfXwA1yiUJF7xtEt3CtVwOjNYQOVknDk0F20w58Fnm3EtG0fA== + dependencies: + npm-normalize-package-bin "^1.0.1" + +npm-normalize-package-bin@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/npm-normalize-package-bin/-/npm-normalize-package-bin-1.0.1.tgz#6e79a41f23fd235c0623218228da7d9c23b8f6e2" + integrity sha512-EPfafl6JL5/rU+ot6P3gRSCpPDW5VmIzX959Ob1+ySFUuuYHWHekXpwdUZcKP5C+DS4GEtdJluwBjnsNDl+fSA== + +npm-packlist@^1.1.6: + version "1.4.8" + resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-1.4.8.tgz#56ee6cc135b9f98ad3d51c1c95da22bbb9b2ef3e" + integrity sha512-5+AZgwru5IevF5ZdnFglB5wNlHG1AOOuw28WhUq8/8emhBmLv6jX5by4WJCh7lW0uSYZYS6DXqIsyZVIXRZU9A== + dependencies: + ignore-walk "^3.0.1" + npm-bundled "^1.0.1" + npm-normalize-package-bin "^1.0.1" + +npm-run-path@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-4.0.1.tgz#b7ecd1e5ed53da8e37a55e1c2269e0b97ed748ea" + integrity sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw== + dependencies: + path-key "^3.0.0" + +npmlog@^4.0.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.1.2.tgz#08a7f2a8bf734604779a9efa4ad5cc717abb954b" + integrity sha512-2uUqazuKlTaSI/dC8AzicUck7+IrEaOnN/e0jd3Xtt1KcGpwx30v50mL7oPyr/h9bL3E4aZccVwpwP+5W9Vjkg== + dependencies: + are-we-there-yet "~1.1.2" + console-control-strings "~1.1.0" + gauge "~2.7.3" + set-blocking "~2.0.0" + +nth-check@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/nth-check/-/nth-check-2.0.0.tgz#1bb4f6dac70072fc313e8c9cd1417b5074c0a125" + integrity sha512-i4sc/Kj8htBrAiH1viZ0TgU8Y5XqCaV/FziYK6TBczxmeKm3AEFWqqF3195yKudrarqy7Zu80Ra5dobFjn9X/Q== + dependencies: + boolbase "^1.0.0" + +nth-check@~1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/nth-check/-/nth-check-1.0.2.tgz#b2bd295c37e3dd58a3bf0700376663ba4d9cf05c" + integrity sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg== + dependencies: + boolbase "~1.0.0" + +nullthrows@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/nullthrows/-/nullthrows-1.1.1.tgz#7818258843856ae971eae4208ad7d7eb19a431b1" + integrity sha512-2vPPEi+Z7WqML2jZYddDIfy5Dqb0r2fze2zTxNNknZaFpVHU3mFB3R+DWeJWGVx0ecvttSGlJTI+WG+8Z4cDWw== + +number-is-nan@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d" + integrity sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0= + +number-to-bn@1.7.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/number-to-bn/-/number-to-bn-1.7.0.tgz#bb3623592f7e5f9e0030b1977bd41a0c53fe1ea0" + integrity sha512-wsJ9gfSz1/s4ZsJN01lyonwuxA1tml6X1yBDnfpMglypcBRFZZkus26EdPSlqS5GJfYddVZa22p3VNb3z5m5Ig== + dependencies: + bn.js "4.11.6" + strip-hex-prefix "1.0.0" + +"nwmatcher@>= 1.3.7 < 2.0.0": + version "1.4.4" + resolved "https://registry.yarnpkg.com/nwmatcher/-/nwmatcher-1.4.4.tgz#2285631f34a95f0d0395cd900c96ed39b58f346e" + integrity sha512-3iuY4N5dhgMpCUrOVnuAdGrgxVqV2cJpM+XNccjR2DKOB1RUP0aA+wGXEiNziG/UKboFyGBIoKOaNlJxx8bciQ== + +oauth-sign@~0.9.0: + version "0.9.0" + resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.9.0.tgz#47a7b016baa68b5fa0ecf3dee08a85c679ac6455" + integrity sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ== + +object-assign@4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.0.tgz#7a3b3d0e98063d43f4c03f2e8ae6cd51a86883a0" + integrity sha1-ejs9DpgGPUP0wD8uiubNUahog6A= + +object-assign@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-2.1.1.tgz#43c36e5d569ff8e4816c4efa8be02d26967c18aa" + integrity sha512-CdsOUYIh5wIiozhJ3rLQgmUTgcyzFwZZrqhkKhODMoGtPKM+wt0h0CNIoauJWMsS9822EdzPsF/6mb4nLvPN5g== + +object-assign@^4, object-assign@^4.0.0, object-assign@^4.0.1, object-assign@^4.1.0, object-assign@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + integrity sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM= + +object-inspect@^1.11.0: + version "1.11.0" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.11.0.tgz#9dceb146cedd4148a0d9e51ab88d34cf509922b1" + integrity sha512-jp7ikS6Sd3GxQfZJPyH3cjcbJF6GZPClgdV+EFygjFLQ5FmW/dRUnTd9PQ9k0JhoNDabWFbpF1yCdSWCC6gexg== + +object-inspect@^1.9.0: + version "1.12.2" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.12.2.tgz#c0641f26394532f28ab8d796ab954e43c009a8ea" + integrity sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ== + +object-keys@^1.0.11, object-keys@^1.0.12, object-keys@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e" + integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA== + +object-keys@~0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-0.4.0.tgz#28a6aae7428dd2c3a92f3d95f21335dd204e0336" + integrity sha1-KKaq50KN0sOpLz2V8hM13SBOAzY= + +object-path@^0.11.4: + version "0.11.8" + resolved "https://registry.yarnpkg.com/object-path/-/object-path-0.11.8.tgz#ed002c02bbdd0070b78a27455e8ae01fc14d4742" + integrity sha512-YJjNZrlXJFM42wTBn6zgOJVar9KFJvzx6sTWDte8sWZF//cnjl0BxHNpfZx+ZffXX63A9q0b1zsFiBX4g4X5KA== + +object.assign@4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.0.tgz#968bf1100d7956bb3ca086f006f846b3bc4008da" + integrity sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w== + dependencies: + define-properties "^1.1.2" + function-bind "^1.1.1" + has-symbols "^1.0.0" + object-keys "^1.0.11" + +object.assign@^4.1.0, object.assign@^4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.2.tgz#0ed54a342eceb37b38ff76eb831a0e788cb63940" + integrity sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ== + dependencies: + call-bind "^1.0.0" + define-properties "^1.1.3" + has-symbols "^1.0.1" + object-keys "^1.1.1" + +object.getownpropertydescriptors@^2.1.1: + version "2.1.3" + resolved "https://registry.yarnpkg.com/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.1.3.tgz#b223cf38e17fefb97a63c10c91df72ccb386df9e" + integrity sha512-VdDoCwvJI4QdC6ndjpqFmoL3/+HxffFBbcJzKi5hwLLqqx3mdbedRpfZDdK0SrOSauj8X4GzBvnDZl4vTN7dOw== + dependencies: + call-bind "^1.0.2" + define-properties "^1.1.3" + es-abstract "^1.19.1" + +object.omit@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/object.omit/-/object.omit-2.0.1.tgz#1a9c744829f39dbb858c76ca3579ae2a54ebd1fa" + integrity sha1-Gpx0SCnznbuFjHbKNXmuKlTr0fo= + dependencies: + for-own "^0.1.4" + is-extendable "^0.1.1" + +oboe@2.1.4: + version "2.1.4" + resolved "https://registry.yarnpkg.com/oboe/-/oboe-2.1.4.tgz#20c88cdb0c15371bb04119257d4fdd34b0aa49f6" + integrity sha1-IMiM2wwVNxuwQRklfU/dNLCqSfY= + dependencies: + http-https "^1.0.0" + +oboe@2.1.5: + version "2.1.5" + resolved "https://registry.yarnpkg.com/oboe/-/oboe-2.1.5.tgz#5554284c543a2266d7a38f17e073821fbde393cd" + integrity sha1-VVQoTFQ6ImbXo48X4HOCH73jk80= + dependencies: + http-https "^1.0.0" + +on-finished@2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.4.1.tgz#58c8c44116e54845ad57f14ab10b03533184ac3f" + integrity sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg== + dependencies: + ee-first "1.1.1" + +once@^1.3.0, once@^1.3.1, once@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" + integrity sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w== + dependencies: + wrappy "1" + +onetime@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/onetime/-/onetime-2.0.1.tgz#067428230fd67443b2794b22bba528b6867962d4" + integrity sha1-BnQoIw/WdEOyeUsiu6UotoZ5YtQ= + dependencies: + mimic-fn "^1.0.0" + +onetime@^5.1.0: + version "5.1.2" + resolved "https://registry.yarnpkg.com/onetime/-/onetime-5.1.2.tgz#d0e96ebb56b07476df1dd9c4806e5237985ca45e" + integrity sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg== + dependencies: + mimic-fn "^2.1.0" + +opencollective-postinstall@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/opencollective-postinstall/-/opencollective-postinstall-2.0.3.tgz#7a0fff978f6dbfa4d006238fbac98ed4198c3259" + integrity sha512-8AV/sCtuzUeTo8gQK5qDZzARrulB3egtLzFgteqB2tcT4Mw7B8Kt7JcDHmltjz6FOAHsvTevk70gZEbhM4ZS9Q== + +optimism@^0.14.0: + version "0.14.0" + resolved "https://registry.yarnpkg.com/optimism/-/optimism-0.14.0.tgz#256fb079a3428585b40a3a8462f907e0abd2fc49" + integrity sha512-ygbNt8n4DOCVpkwiLF+IrKKeNHOjtr9aXLWGP9HNJGoblSGsnVbJLstcH6/nE9Xy5ZQtlkSioFQNnthmENW6FQ== + dependencies: + "@wry/context" "^0.5.2" + "@wry/trie" "^0.2.1" + +optimist@~0.3.5: + version "0.3.7" + resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.3.7.tgz#c90941ad59e4273328923074d2cf2e7cbc6ec0d9" + integrity sha512-TCx0dXQzVtSCg2OgY/bO9hjM9cV4XYx09TVK+s3+FhkjT6LovsLe+pPMzpWf+6yXK/hUizs2gUoTw3jHM0VaTQ== + dependencies: + wordwrap "~0.0.2" + +optionator@^0.8.1: + version "0.8.3" + resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.3.tgz#84fa1d036fe9d3c7e21d99884b601167ec8fb495" + integrity sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA== + dependencies: + deep-is "~0.1.3" + fast-levenshtein "~2.0.6" + levn "~0.3.0" + prelude-ls "~1.1.2" + type-check "~0.3.2" + word-wrap "~1.2.3" + +ora@^3.4.0: + version "3.4.0" + resolved "https://registry.yarnpkg.com/ora/-/ora-3.4.0.tgz#bf0752491059a3ef3ed4c85097531de9fdbcd318" + integrity sha512-eNwHudNbO1folBP3JsZ19v9azXWtQZjICdr3Q0TDPIaeBQ3mXLrh54wM+er0+hSp+dWKf+Z8KM58CYzEyIYxYg== + dependencies: + chalk "^2.4.2" + cli-cursor "^2.1.0" + cli-spinners "^2.0.0" + log-symbols "^2.2.0" + strip-ansi "^5.2.0" + wcwidth "^1.0.1" + +ora@^4.0.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/ora/-/ora-4.1.1.tgz#566cc0348a15c36f5f0e979612842e02ba9dddbc" + integrity sha512-sjYP8QyVWBpBZWD6Vr1M/KwknSw6kJOz41tvGMlwWeClHBtYKTbHMki1PsLZnxKpXMPbTKv9b3pjQu3REib96A== + dependencies: + chalk "^3.0.0" + cli-cursor "^3.1.0" + cli-spinners "^2.2.0" + is-interactive "^1.0.0" + log-symbols "^3.0.0" + mute-stream "0.0.8" + strip-ansi "^6.0.0" + wcwidth "^1.0.1" + +ordered-read-streams@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/ordered-read-streams/-/ordered-read-streams-0.3.0.tgz#7137e69b3298bb342247a1bbee3881c80e2fd78b" + integrity sha1-cTfmmzKYuzQiR6G77jiByA4v14s= + dependencies: + is-stream "^1.0.1" + readable-stream "^2.0.1" + +original-require@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/original-require/-/original-require-1.0.1.tgz#0f130471584cd33511c5ec38c8d59213f9ac5e20" + integrity sha1-DxMEcVhM0zURxew4yNWSE/msXiA= + +original@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/original/-/original-1.0.2.tgz#e442a61cffe1c5fd20a65f3261c26663b303f25f" + integrity sha512-hyBVl6iqqUOJ8FqRe+l/gS8H+kKYjrEndd5Pm1MfBtsEKA038HkkdbAl/72EAXGyonD/PFsvmVG+EvcIpliMBg== + dependencies: + url-parse "^1.4.3" + +os-homedir@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3" + integrity sha1-/7xJiDNuDoM94MFox+8VISGqf7M= + +os-locale@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/os-locale/-/os-locale-1.4.0.tgz#20f9f17ae29ed345e8bde583b13d2009803c14d9" + integrity sha1-IPnxeuKe00XoveWDsT0gCYA8FNk= + dependencies: + lcid "^1.0.0" + +os-tmpdir@^1.0.0, os-tmpdir@^1.0.1, os-tmpdir@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" + integrity sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ= + +osenv@^0.1.4: + version "0.1.5" + resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.5.tgz#85cdfafaeb28e8677f416e287592b5f3f49ea410" + integrity sha512-0CWcCECdMVc2Rw3U5w9ZjqX6ga6ubk1xDVKxtBQPK7wis/0F2r9T6k4ydGYhecl7YUBxBVxhL5oisPsNxAPe2g== + dependencies: + os-homedir "^1.0.0" + os-tmpdir "^1.0.0" + +p-cancelable@^0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-0.3.0.tgz#b9e123800bcebb7ac13a479be195b507b98d30fa" + integrity sha512-RVbZPLso8+jFeq1MfNvgXtCRED2raz/dKpacfTNxsx6pLEpEomM7gah6VeHSYV3+vo0OAi4MkArtQcWWXuQoyw== + +p-cancelable@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-1.1.0.tgz#d078d15a3af409220c886f1d9a0ca2e441ab26cc" + integrity sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw== + +p-finally@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae" + integrity sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4= + +p-finally@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-2.0.1.tgz#bd6fcaa9c559a096b680806f4d657b3f0f240561" + integrity sha512-vpm09aKwq6H9phqRQzecoDpD8TmVyGw70qmWlyq5onxY7tqyTTFVvxMykxQSQKILBSFlbXpypIw2T1Ml7+DDtw== + +p-limit@3.1.0, p-limit@^3.0.2: + version "3.1.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-3.1.0.tgz#e1daccbe78d0d1388ca18c64fea38e3e57e3706b" + integrity sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ== + dependencies: + yocto-queue "^0.1.0" + +p-limit@^1.1.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-1.3.0.tgz#b86bd5f0c25690911c7590fcbfc2010d54b3ccb8" + integrity sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q== + dependencies: + p-try "^1.0.0" + +p-limit@^2.0.0, p-limit@^2.2.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.3.0.tgz#3dd33c647a214fdfffd835933eb086da0dc21db1" + integrity sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w== + dependencies: + p-try "^2.0.0" + +p-locate@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-2.0.0.tgz#20a0103b222a70c8fd39cc2e580680f3dde5ec43" + integrity sha1-IKAQOyIqcMj9OcwuWAaA893l7EM= + dependencies: + p-limit "^1.1.0" + +p-locate@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-3.0.0.tgz#322d69a05c0264b25997d9f40cd8a891ab0064a4" + integrity sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ== + dependencies: + p-limit "^2.0.0" + +p-locate@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-4.1.0.tgz#a3428bb7088b3a60292f66919278b7c297ad4f07" + integrity sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A== + dependencies: + p-limit "^2.2.0" + +p-locate@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-5.0.0.tgz#83c8315c6785005e3bd021839411c9e110e6d834" + integrity sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw== + dependencies: + p-limit "^3.0.2" + +p-timeout@^1.1.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/p-timeout/-/p-timeout-1.2.1.tgz#5eb3b353b7fce99f101a1038880bb054ebbea386" + integrity sha1-XrOzU7f86Z8QGhA4iAuwVOu+o4Y= + dependencies: + p-finally "^1.0.0" + +p-try@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/p-try/-/p-try-1.0.0.tgz#cbc79cdbaf8fd4228e13f621f2b1a237c1b207b3" + integrity sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M= + +p-try@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" + integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== + +pako@^1.0.4: + version "1.0.11" + resolved "https://registry.yarnpkg.com/pako/-/pako-1.0.11.tgz#6c9599d340d54dfd3946380252a35705a6b992bf" + integrity sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw== + +param-case@^2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/param-case/-/param-case-2.1.1.tgz#df94fd8cf6531ecf75e6bef9a0858fbc72be2247" + integrity sha1-35T9jPZTHs915r75oIWPvHK+Ikc= + dependencies: + no-case "^2.2.0" + +parent-module@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2" + integrity sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g== + dependencies: + callsites "^3.0.0" + +parse-asn1@^5.0.0, parse-asn1@^5.1.5: + version "5.1.6" + resolved "https://registry.yarnpkg.com/parse-asn1/-/parse-asn1-5.1.6.tgz#385080a3ec13cb62a62d39409cb3e88844cdaed4" + integrity sha512-RnZRo1EPU6JBnra2vGHj0yhp6ebyjBZpmUCLHWiFhxlzvBCCpAuZ7elsBp1PVAbQN0/04VD/19rfzlBSwLstMw== + dependencies: + asn1.js "^5.2.0" + browserify-aes "^1.0.0" + evp_bytestokey "^1.0.0" + pbkdf2 "^3.0.3" + safe-buffer "^5.1.1" + +parse-cache-control@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/parse-cache-control/-/parse-cache-control-1.0.1.tgz#8eeab3e54fa56920fe16ba38f77fa21aacc2d74e" + integrity sha512-60zvsJReQPX5/QP0Kzfd/VrpjScIQ7SHBW6bFCYfEP+fp0Eppr1SHhIO5nd1PjZtvclzSzES9D/p5nFJurwfWg== + +parse-glob@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/parse-glob/-/parse-glob-3.0.4.tgz#b2c376cfb11f35513badd173ef0bb6e3a388391c" + integrity sha1-ssN2z7EfNVE7rdFz7wu246OIORw= + dependencies: + glob-base "^0.3.0" + is-dotfile "^1.0.0" + is-extglob "^1.0.0" + is-glob "^2.0.0" + +parse-headers@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/parse-headers/-/parse-headers-2.0.3.tgz#5e8e7512383d140ba02f0c7aa9f49b4399c92515" + integrity sha512-QhhZ+DCCit2Coi2vmAKbq5RGTRcQUOE2+REgv8vdyu7MnYx2eZztegqtTx99TZ86GTIwqiy3+4nQTWZ2tgmdCA== + +parse-json@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-2.2.0.tgz#f480f40434ef80741f8469099f8dea18f55a4dc9" + integrity sha1-9ID0BDTvgHQfhGkJn43qGPVaTck= + dependencies: + error-ex "^1.2.0" + +parse-json@^5.0.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-5.2.0.tgz#c76fc66dee54231c962b22bcc8a72cf2f99753cd" + integrity sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg== + dependencies: + "@babel/code-frame" "^7.0.0" + error-ex "^1.3.1" + json-parse-even-better-errors "^2.3.0" + lines-and-columns "^1.1.6" + +parse5-htmlparser2-tree-adapter@^6.0.0: + version "6.0.1" + resolved "https://registry.yarnpkg.com/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz#2cdf9ad823321140370d4dbf5d3e92c7c8ddc6e6" + integrity sha512-qPuWvbLgvDGilKc5BoicRovlT4MtYT6JfJyBOMDsKoiT+GiuP5qyrPCnR9HcPECIJJmZh5jRndyNThnhhb/vlA== + dependencies: + parse5 "^6.0.1" + +parse5@^1.5.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/parse5/-/parse5-1.5.1.tgz#9b7f3b0de32be78dc2401b17573ccaf0f6f59d94" + integrity sha1-m387DeMr543CQBsXVzzK8Pb1nZQ= + +parse5@^3.0.1: + version "3.0.3" + resolved "https://registry.yarnpkg.com/parse5/-/parse5-3.0.3.tgz#042f792ffdd36851551cf4e9e066b3874ab45b5c" + integrity sha512-rgO9Zg5LLLkfJF9E6CCmXlSE4UVceloys8JrFqCcHloC3usd/kJCyPDwH2SOlzix2j3xaP9sUX3e8+kvkuleAA== + dependencies: + "@types/node" "*" + +parse5@^6.0.0, parse5@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/parse5/-/parse5-6.0.1.tgz#e1a1c085c569b3dc08321184f19a39cc27f7c30b" + integrity sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw== + +parseurl@^1.3.2, parseurl@~1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.3.tgz#9da19e7bee8d12dff0513ed5b76957793bc2e8d4" + integrity sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ== + +pascal-case@^2.0.0, pascal-case@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/pascal-case/-/pascal-case-2.0.1.tgz#2d578d3455f660da65eca18ef95b4e0de912761e" + integrity sha1-LVeNNFX2YNpl7KGO+VtODekSdh4= + dependencies: + camel-case "^3.0.0" + upper-case-first "^1.1.0" + +pascal-case@^3.1.1, pascal-case@^3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/pascal-case/-/pascal-case-3.1.2.tgz#b48e0ef2b98e205e7c1dae747d0b1508237660eb" + integrity sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g== + dependencies: + no-case "^3.0.4" + tslib "^2.0.3" + +path-case@^2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/path-case/-/path-case-2.1.1.tgz#94b8037c372d3fe2906e465bb45e25d226e8eea5" + integrity sha1-lLgDfDctP+KQbkZbtF4l0ibo7qU= + dependencies: + no-case "^2.2.0" + +path-dirname@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/path-dirname/-/path-dirname-1.0.2.tgz#cc33d24d525e099a5388c0336c6e32b9160609e0" + integrity sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA= + +path-exists@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-2.1.0.tgz#0feb6c64f0fc518d9a754dd5efb62c7022761f4b" + integrity sha1-D+tsZPD8UY2adU3V77YscCJ2H0s= + dependencies: + pinkie-promise "^2.0.0" + +path-exists@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-3.0.0.tgz#ce0ebeaa5f78cb18925ea7d810d7b59b010fd515" + integrity sha1-zg6+ql94yxiSXqfYENe1mwEP1RU= + +path-exists@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" + integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== + +path-is-absolute@^1.0.0, path-is-absolute@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" + integrity sha1-F0uSaHNVNP+8es5r9TpanhtcX18= + +path-key@^3.0.0, path-key@^3.1.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375" + integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== + +path-parse@^1.0.6: + version "1.0.7" + resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" + integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== + +path-to-regexp@0.1.7: + version "0.1.7" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" + integrity sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ== + +path-type@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-1.1.0.tgz#59c44f7ee491da704da415da5a4070ba4f8fe441" + integrity sha1-WcRPfuSR2nBNpBXaWkBwuk+P5EE= + dependencies: + graceful-fs "^4.1.2" + pify "^2.0.0" + pinkie-promise "^2.0.0" + +path-type@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b" + integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== + +pbkdf2@^3.0.17: + version "3.1.2" + resolved "https://registry.yarnpkg.com/pbkdf2/-/pbkdf2-3.1.2.tgz#dd822aa0887580e52f1a039dc3eda108efae3075" + integrity sha512-iuh7L6jA7JEGu2WxDwtQP1ddOpaJNC4KlDEFfdQajSGgGPNi4OyDc2R7QnbY2bR9QjBVGwgvTdNJZoE7RaxUMA== + dependencies: + create-hash "^1.1.2" + create-hmac "^1.1.4" + ripemd160 "^2.0.1" + safe-buffer "^5.0.1" + sha.js "^2.4.8" + +pbkdf2@^3.0.3: + version "3.1.1" + resolved "https://registry.yarnpkg.com/pbkdf2/-/pbkdf2-3.1.1.tgz#cb8724b0fada984596856d1a6ebafd3584654b94" + integrity sha512-4Ejy1OPxi9f2tt1rRV7Go7zmfDQ+ZectEQz3VGUQhgq62HtIRPDyG/JtnwIxs6x3uNMwo2V7q1fMvKjb+Tnpqg== + dependencies: + create-hash "^1.1.2" + create-hmac "^1.1.4" + ripemd160 "^2.0.1" + safe-buffer "^5.0.1" + sha.js "^2.4.8" + +peer-id@~0.12.2, peer-id@~0.12.3: + version "0.12.5" + resolved "https://registry.yarnpkg.com/peer-id/-/peer-id-0.12.5.tgz#b22a1edc5b4aaaa2bb830b265ba69429823e5179" + integrity sha512-3xVWrtIvNm9/OPzaQBgXDrfWNx63AftgFQkvqO6YSZy7sP3Fuadwwbn54F/VO9AnpyW/26i0WRQz9FScivXrmw== + dependencies: + async "^2.6.3" + class-is "^1.1.0" + libp2p-crypto "~0.16.1" + multihashes "~0.4.15" + +peer-info@~0.15.1: + version "0.15.1" + resolved "https://registry.yarnpkg.com/peer-info/-/peer-info-0.15.1.tgz#21254a7c516d0dd046b150120b9aaf1b9ad02146" + integrity sha512-Y91Q2tZRC0CpSTPd1UebhGqniOrOAk/aj60uYUcWJXCoLTAnGu+4LJGoiay8ayudS6ice7l3SKhgL/cS62QacA== + dependencies: + mafmt "^6.0.2" + multiaddr "^6.0.3" + peer-id "~0.12.2" + unique-by "^1.0.0" + +pem-jwk@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/pem-jwk/-/pem-jwk-2.0.0.tgz#1c5bb264612fc391340907f5c1de60c06d22f085" + integrity sha512-rFxu7rVoHgQ5H9YsP50dDWf0rHjreVA2z0yPiWr5WdH/UHb29hKtF7h6l8vNd1cbYR1t0QL+JKhW55a2ZV4KtA== + dependencies: + asn1.js "^5.0.1" + +performance-now@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" + integrity sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow== + +picomatch@^2.0.4, picomatch@^2.2.1: + version "2.3.1" + resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" + integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== + +picomatch@^2.0.5: + version "2.2.2" + resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.2.2.tgz#21f333e9b6b8eaff02468f5146ea406d345f4dad" + integrity sha512-q0M/9eZHzmr0AulXyPwNfZjtwZ/RBZlbN3K3CErVrk50T2ASYI7Bye0EvekFY3IP1Nt2DHu0re+V2ZHIpMkuWg== + +pify@^2.0.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c" + integrity sha1-7RQaasBDqEnqWISY59yosVMw6Qw= + +pify@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/pify/-/pify-3.0.0.tgz#e5a4acd2c101fdf3d9a4d07f0dbc4db49dd28176" + integrity sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY= + +pinkie-promise@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa" + integrity sha1-ITXW36ejWMBprJsXh3YogihFD/o= + dependencies: + pinkie "^2.0.0" + +pinkie@^2.0.0: + version "2.0.4" + resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870" + integrity sha1-clVrgM+g1IqXToDnckjoDtT3+HA= + +pkg-conf@^1.1.2: + version "1.1.3" + resolved "https://registry.yarnpkg.com/pkg-conf/-/pkg-conf-1.1.3.tgz#378e56d6fd13e88bfb6f4a25df7a83faabddba5b" + integrity sha1-N45W1v0T6Iv7b0ol33qD+qvduls= + dependencies: + find-up "^1.0.0" + load-json-file "^1.1.0" + object-assign "^4.0.1" + symbol "^0.2.1" + +pkginfo@0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/pkginfo/-/pkginfo-0.4.1.tgz#b5418ef0439de5425fc4995042dced14fb2a84ff" + integrity sha1-tUGO8EOd5UJfxJlQQtztFPsqhP8= + +pluralize@^8.0.0: + version "8.0.0" + resolved "https://registry.yarnpkg.com/pluralize/-/pluralize-8.0.0.tgz#1a6fa16a38d12a1901e0320fa017051c539ce3b1" + integrity sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA== + +pouchdb-abstract-mapreduce@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-abstract-mapreduce/-/pouchdb-abstract-mapreduce-7.2.2.tgz#dd1b10a83f8d24361dce9aaaab054614b39f766f" + integrity sha512-7HWN/2yV2JkwMnGnlp84lGvFtnm0Q55NiBUdbBcaT810+clCGKvhssBCrXnmwShD1SXTwT83aszsgiSfW+SnBA== + dependencies: + pouchdb-binary-utils "7.2.2" + pouchdb-collate "7.2.2" + pouchdb-collections "7.2.2" + pouchdb-errors "7.2.2" + pouchdb-fetch "7.2.2" + pouchdb-mapreduce-utils "7.2.2" + pouchdb-md5 "7.2.2" + pouchdb-utils "7.2.2" + +pouchdb-adapter-leveldb-core@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-adapter-leveldb-core/-/pouchdb-adapter-leveldb-core-7.2.2.tgz#e0aa6a476e2607d7ae89f4a803c9fba6e6d05a8a" + integrity sha512-K9UGf1Ivwe87mjrMqN+1D07tO/DfU7ariVDrGffuOjvl+3BcvUF25IWrxsBObd4iPOYCH7NVQWRpojhBgxULtQ== + dependencies: + argsarray "0.0.1" + buffer-from "1.1.1" + double-ended-queue "2.1.0-0" + levelup "4.4.0" + pouchdb-adapter-utils "7.2.2" + pouchdb-binary-utils "7.2.2" + pouchdb-collections "7.2.2" + pouchdb-errors "7.2.2" + pouchdb-json "7.2.2" + pouchdb-md5 "7.2.2" + pouchdb-merge "7.2.2" + pouchdb-utils "7.2.2" + sublevel-pouchdb "7.2.2" + through2 "3.0.2" + +pouchdb-adapter-memory@^7.1.1: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-adapter-memory/-/pouchdb-adapter-memory-7.2.2.tgz#c0ec2e87928d516ca9d1b5badc7269df6f95e5ea" + integrity sha512-9o+zdItPEq7rIrxdkUxgsLNaZkDJAGEqqoYgeYdrHidOCZnlhxhX3g7/R/HcpDKC513iEPqJWDJQSfeT6nVKkw== + dependencies: + memdown "1.4.1" + pouchdb-adapter-leveldb-core "7.2.2" + pouchdb-utils "7.2.2" + +pouchdb-adapter-node-websql@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pouchdb-adapter-node-websql/-/pouchdb-adapter-node-websql-7.0.0.tgz#64ad88dd45b23578e454bf3032a3a79f9d1e4008" + integrity sha512-fNaOMO8bvMrRTSfmH4RSLSpgnKahRcCA7Z0jg732PwRbGvvMdGbreZwvKPPD1fg2tm2ZwwiXWK2G3+oXyoqZYw== + dependencies: + pouchdb-adapter-websql-core "7.0.0" + pouchdb-utils "7.0.0" + websql "1.0.0" + +pouchdb-adapter-utils@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pouchdb-adapter-utils/-/pouchdb-adapter-utils-7.0.0.tgz#1ac8d34481911e0e9a9bf51024610a2e7351dc80" + integrity sha512-UWKPC6jkz6mHUzZefrU7P5X8ZGvBC8LSNZ7BIp0hWvJE6c20cnpDwedTVDpZORcCbVJpDmFOHBYnOqEIblPtbA== + dependencies: + pouchdb-binary-utils "7.0.0" + pouchdb-collections "7.0.0" + pouchdb-errors "7.0.0" + pouchdb-md5 "7.0.0" + pouchdb-merge "7.0.0" + pouchdb-utils "7.0.0" + +pouchdb-adapter-utils@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-adapter-utils/-/pouchdb-adapter-utils-7.2.2.tgz#c64426447d9044ba31517a18500d6d2d28abd47d" + integrity sha512-2CzZkTyTyHZkr3ePiWFMTiD5+56lnembMjaTl8ohwegM0+hYhRyJux0biAZafVxgIL4gnCUC4w2xf6WVztzKdg== + dependencies: + pouchdb-binary-utils "7.2.2" + pouchdb-collections "7.2.2" + pouchdb-errors "7.2.2" + pouchdb-md5 "7.2.2" + pouchdb-merge "7.2.2" + pouchdb-utils "7.2.2" + +pouchdb-adapter-websql-core@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pouchdb-adapter-websql-core/-/pouchdb-adapter-websql-core-7.0.0.tgz#27b3e404159538e515b2567baa7869f90caac16c" + integrity sha512-NyMaH0bl20SdJdOCzd+fwXo8JZ15a48/MAwMcIbXzsRHE4DjFNlRcWAcjUP6uN4Ezc+Gx+r2tkBBMf71mIz1Aw== + dependencies: + pouchdb-adapter-utils "7.0.0" + pouchdb-binary-utils "7.0.0" + pouchdb-collections "7.0.0" + pouchdb-errors "7.0.0" + pouchdb-json "7.0.0" + pouchdb-merge "7.0.0" + pouchdb-utils "7.0.0" + +pouchdb-binary-utils@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pouchdb-binary-utils/-/pouchdb-binary-utils-7.0.0.tgz#cb71a288b09572a231f6bab1b4aed201c4d219a7" + integrity sha512-yUktdOPIPvOVouCjJN3uop+bCcpdPwePrLm9eUAZNgEYnUFu0njdx7Q0WRsZ7UJ6l75HinL5ZHk4bnvEt86FLw== + dependencies: + buffer-from "1.1.0" + +pouchdb-binary-utils@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-binary-utils/-/pouchdb-binary-utils-7.2.2.tgz#0690b348052c543b1e67f032f47092ca82bcb10e" + integrity sha512-shacxlmyHbUrNfE6FGYpfyAJx7Q0m91lDdEAaPoKZM3SzAmbtB1i+OaDNtYFztXjJl16yeudkDb3xOeokVL3Qw== + dependencies: + buffer-from "1.1.1" + +pouchdb-collate@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-collate/-/pouchdb-collate-7.2.2.tgz#fc261f5ef837c437e3445fb0abc3f125d982c37c" + integrity sha512-/SMY9GGasslknivWlCVwXMRMnQ8myKHs4WryQ5535nq1Wj/ehpqWloMwxEQGvZE1Sda3LOm7/5HwLTcB8Our+w== + +pouchdb-collections@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pouchdb-collections/-/pouchdb-collections-7.0.0.tgz#fd1f632337dc6301b0ff8649732ca79204e41780" + integrity sha512-DaoUr/vU24Q3gM6ghj0va9j/oBanPwkbhkvnqSyC3Dm5dgf5pculNxueLF9PKMo3ycApoWzHMh6N2N8KJbDU2Q== + +pouchdb-collections@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-collections/-/pouchdb-collections-7.2.2.tgz#aeed77f33322429e3f59d59ea233b48ff0e68572" + integrity sha512-6O9zyAYlp3UdtfneiMYuOCWdUCQNo2bgdjvNsMSacQX+3g8WvIoFQCYJjZZCpTttQGb+MHeRMr8m2U95lhJTew== + +pouchdb-debug@^7.1.1: + version "7.2.1" + resolved "https://registry.yarnpkg.com/pouchdb-debug/-/pouchdb-debug-7.2.1.tgz#f5f869f6113c12ccb97cddf5b0a32b6e0e67e961" + integrity sha512-eP3ht/AKavLF2RjTzBM6S9gaI2/apcW6xvaKRQhEdOfiANqerFuksFqHCal3aikVQuDO+cB/cw+a4RyJn/glBw== + dependencies: + debug "3.1.0" + +pouchdb-errors@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pouchdb-errors/-/pouchdb-errors-7.0.0.tgz#4e2a5a8b82af20cbe5f9970ca90b7ec74563caa0" + integrity sha512-dTusY8nnTw4HIztCrNl7AoGgwvS1bVf/3/97hDaGc4ytn72V9/4dK8kTqlimi3UpaurohYRnqac0SGXYP8vgXA== + dependencies: + inherits "2.0.3" + +pouchdb-errors@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-errors/-/pouchdb-errors-7.2.2.tgz#80d811d65c766c9d20b755c6e6cc123f8c3c4792" + integrity sha512-6GQsiWc+7uPfgEHeavG+7wuzH3JZW29Dnrvz8eVbDFE50kVFxNDVm3EkYHskvo5isG7/IkOx7PV7RPTA3keG3g== + dependencies: + inherits "2.0.4" + +pouchdb-fetch@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-fetch/-/pouchdb-fetch-7.2.2.tgz#492791236d60c899d7e9973f9aca0d7b9cc02230" + integrity sha512-lUHmaG6U3zjdMkh8Vob9GvEiRGwJfXKE02aZfjiVQgew+9SLkuOxNw3y2q4d1B6mBd273y1k2Lm0IAziRNxQnA== + dependencies: + abort-controller "3.0.0" + fetch-cookie "0.10.1" + node-fetch "2.6.0" + +pouchdb-find@^7.0.0: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-find/-/pouchdb-find-7.2.2.tgz#1227afdd761812d508fe0794b3e904518a721089" + integrity sha512-BmFeFVQ0kHmDehvJxNZl9OmIztCjPlZlVSdpijuFbk/Fi1EFPU1BAv3kLC+6DhZuOqU/BCoaUBY9sn66pPY2ag== + dependencies: + pouchdb-abstract-mapreduce "7.2.2" + pouchdb-collate "7.2.2" + pouchdb-errors "7.2.2" + pouchdb-fetch "7.2.2" + pouchdb-md5 "7.2.2" + pouchdb-selector-core "7.2.2" + pouchdb-utils "7.2.2" + +pouchdb-json@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pouchdb-json/-/pouchdb-json-7.0.0.tgz#d9860f66f27a359ac6e4b24da4f89b6909f37530" + integrity sha512-w0bNRu/7VmmCrFWMYAm62n30wvJJUT2SokyzeTyj3hRohj4GFwTRg1mSZ+iAmxgRKOFE8nzZstLG/WAB4Ymjew== + dependencies: + vuvuzela "1.0.3" + +pouchdb-json@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-json/-/pouchdb-json-7.2.2.tgz#b939be24b91a7322e9a24b8880a6e21514ec5e1f" + integrity sha512-3b2S2ynN+aoB7aCNyDZc/4c0IAdx/ir3nsHB+/RrKE9cM3QkQYbnnE3r/RvOD1Xvr6ji/KOCBie+Pz/6sxoaug== + dependencies: + vuvuzela "1.0.3" + +pouchdb-mapreduce-utils@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-mapreduce-utils/-/pouchdb-mapreduce-utils-7.2.2.tgz#13a46a3cc2a3f3b8e24861da26966904f2963146" + integrity sha512-rAllb73hIkU8rU2LJNbzlcj91KuulpwQu804/F6xF3fhZKC/4JQMClahk+N/+VATkpmLxp1zWmvmgdlwVU4HtQ== + dependencies: + argsarray "0.0.1" + inherits "2.0.4" + pouchdb-collections "7.2.2" + pouchdb-utils "7.2.2" + +pouchdb-md5@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pouchdb-md5/-/pouchdb-md5-7.0.0.tgz#935dc6bb507a5f3978fb653ca5790331bae67c96" + integrity sha512-yaSJKhLA3QlgloKUQeb2hLdT3KmUmPfoYdryfwHZuPTpXIRKTnMQTR9qCIRUszc0ruBpDe53DRslCgNUhAyTNQ== + dependencies: + pouchdb-binary-utils "7.0.0" + spark-md5 "3.0.0" + +pouchdb-md5@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-md5/-/pouchdb-md5-7.2.2.tgz#415401acc5a844112d765bd1fb4e5d9f38fb0838" + integrity sha512-c/RvLp2oSh8PLAWU5vFBnp6ejJABIdKqboZwRRUrWcfGDf+oyX8RgmJFlYlzMMOh4XQLUT1IoaDV8cwlsuryZw== + dependencies: + pouchdb-binary-utils "7.2.2" + spark-md5 "3.0.1" + +pouchdb-merge@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pouchdb-merge/-/pouchdb-merge-7.0.0.tgz#9f476ce7e32aae56904ad770ae8a1dfe14b57547" + integrity sha512-tci5u6NpznQhGcPv4ho1h0miky9rs+ds/T9zQ9meQeDZbUojXNaX1Jxsb0uYEQQ+HMqdcQs3Akdl0/u0mgwPGg== + +pouchdb-merge@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-merge/-/pouchdb-merge-7.2.2.tgz#940d85a2b532d6a93a6cab4b250f5648511bcc16" + integrity sha512-6yzKJfjIchBaS7Tusuk8280WJdESzFfQ0sb4jeMUNnrqs4Cx3b0DIEOYTRRD9EJDM+je7D3AZZ4AT0tFw8gb4A== + +pouchdb-selector-core@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-selector-core/-/pouchdb-selector-core-7.2.2.tgz#264d7436a8c8ac3801f39960e79875ef7f3879a0" + integrity sha512-XYKCNv9oiNmSXV5+CgR9pkEkTFqxQGWplnVhO3W9P154H08lU0ZoNH02+uf+NjZ2kjse7Q1fxV4r401LEcGMMg== + dependencies: + pouchdb-collate "7.2.2" + pouchdb-utils "7.2.2" + +pouchdb-utils@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/pouchdb-utils/-/pouchdb-utils-7.0.0.tgz#48bfced6665b8f5a2b2d2317e2aa57635ed1e88e" + integrity sha512-1bnoX1KdZYHv9wicDIFdO0PLiVIMzNDUBUZ/yOJZ+6LW6niQCB8aCv09ZztmKfSQcU5nnN3fe656tScBgP6dOQ== + dependencies: + argsarray "0.0.1" + clone-buffer "1.0.0" + immediate "3.0.6" + inherits "2.0.3" + pouchdb-collections "7.0.0" + pouchdb-errors "7.0.0" + pouchdb-md5 "7.0.0" + uuid "3.2.1" + +pouchdb-utils@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/pouchdb-utils/-/pouchdb-utils-7.2.2.tgz#c17c4788f1d052b0daf4ef8797bbc4aaa3945aa4" + integrity sha512-XmeM5ioB4KCfyB2MGZXu1Bb2xkElNwF1qG+zVFbQsKQij0zvepdOUfGuWvLRHxTOmt4muIuSOmWZObZa3NOgzQ== + dependencies: + argsarray "0.0.1" + clone-buffer "1.0.0" + immediate "3.3.0" + inherits "2.0.4" + pouchdb-collections "7.2.2" + pouchdb-errors "7.2.2" + pouchdb-md5 "7.2.2" + uuid "8.1.0" + +pouchdb@7.1.1: + version "7.1.1" + resolved "https://registry.yarnpkg.com/pouchdb/-/pouchdb-7.1.1.tgz#f5f8dcd1fc440fb76651cb26f6fc5d97a39cd6ce" + integrity sha512-8bXWclixNJZqokvxGHRsG19zehSJiaZaz4dVYlhXhhUctz7gMcNTElHjPBzBdZlKKvt9aFDndmXN1VVE53Co8g== + dependencies: + argsarray "0.0.1" + buffer-from "1.1.0" + clone-buffer "1.0.0" + double-ended-queue "2.1.0-0" + fetch-cookie "0.7.0" + immediate "3.0.6" + inherits "2.0.3" + level "5.0.1" + level-codec "9.0.1" + level-write-stream "1.0.0" + leveldown "5.0.2" + levelup "4.0.2" + ltgt "2.2.1" + node-fetch "2.4.1" + readable-stream "1.0.33" + spark-md5 "3.0.0" + through2 "3.0.1" + uuid "3.2.1" + vuvuzela "1.0.3" + +precond@0.2: + version "0.2.3" + resolved "https://registry.yarnpkg.com/precond/-/precond-0.2.3.tgz#aa9591bcaa24923f1e0f4849d240f47efc1075ac" + integrity sha1-qpWRvKokkj8eD0hJ0kD0fvwQdaw= + +prelude-ls@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54" + integrity sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ= + +prepend-http@^1.0.1: + version "1.0.4" + resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-1.0.4.tgz#d4f4562b0ce3696e41ac52d0e002e57a635dc6dc" + integrity sha1-1PRWKwzjaW5BrFLQ4ALlemNdxtw= + +prepend-http@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-2.0.0.tgz#e92434bfa5ea8c19f41cdfd401d741a3c819d897" + integrity sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc= + +preserve@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/preserve/-/preserve-0.2.0.tgz#815ed1f6ebc65926f865b310c0713bcb3315ce4b" + integrity sha1-gV7R9uvGWSb4ZbMQwHE7yzMVzks= + +prettier@1.19.1: + version "1.19.1" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-1.19.1.tgz#f7d7f5ff8a9cd872a7be4ca142095956a60797cb" + integrity sha512-s7PoyDv/II1ObgQunCbB9PdLmUcBZcnWOcxDh7O0N/UwDEsHyqkW+Qh28jW+mVuCdx7gLB0BotYI1Y6uI9iyew== + +private@^0.1.8: + version "0.1.8" + resolved "https://registry.yarnpkg.com/private/-/private-0.1.8.tgz#2381edb3689f7a53d653190060fcf822d2f368ff" + integrity sha512-VvivMrbvd2nKkiG38qjULzlc+4Vx4wm/whI9pQD35YrARNnhxeiRktSOhSukRLFNlzg6Br/cJPet5J/u19r/mg== + +process-nextick-args@~1.0.6: + version "1.0.7" + resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-1.0.7.tgz#150e20b756590ad3f91093f25a4f2ad8bff30ba3" + integrity sha1-FQ4gt1ZZCtP5EJPyWk8q2L/zC6M= + +process-nextick-args@~2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.1.tgz#7820d9b16120cc55ca9ae7792680ae7dba6d7fe2" + integrity sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag== + +process@^0.11.10: + version "0.11.10" + resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" + integrity sha1-czIwDoQBYb2j5podHZGn1LwW8YI= + +promise-nodeify@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/promise-nodeify/-/promise-nodeify-3.0.1.tgz#f0f5d9720ee9ec71dd2bfa92667be504c10229c2" + integrity sha512-ghsSuzZXJX8iO7WVec2z7GI+Xk/EyiD+JZK7AZKhUqYfpLa/Zs4ylUD+CwwnKlG6G3HnkUPMAi6PO7zeqGKssg== + +promise-to-callback@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/promise-to-callback/-/promise-to-callback-1.0.0.tgz#5d2a749010bfb67d963598fcd3960746a68feef7" + integrity sha1-XSp0kBC/tn2WNZj805YHRqaP7vc= + dependencies: + is-fn "^1.0.0" + set-immediate-shim "^1.0.1" + +promise.allsettled@1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/promise.allsettled/-/promise.allsettled-1.0.2.tgz#d66f78fbb600e83e863d893e98b3d4376a9c47c9" + integrity sha512-UpcYW5S1RaNKT6pd+s9jp9K9rlQge1UXKskec0j6Mmuq7UJCvlS2J2/s/yuPN8ehftf9HXMxWlKiPbGGUzpoRg== + dependencies: + array.prototype.map "^1.0.1" + define-properties "^1.1.3" + es-abstract "^1.17.0-next.1" + function-bind "^1.1.1" + iterate-value "^1.0.0" + +promise@^7.1.1: + version "7.3.1" + resolved "https://registry.yarnpkg.com/promise/-/promise-7.3.1.tgz#064b72602b18f90f29192b8b1bc418ffd1ebd3bf" + integrity sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg== + dependencies: + asap "~2.0.3" + +promise@^8.0.0: + version "8.1.0" + resolved "https://registry.yarnpkg.com/promise/-/promise-8.1.0.tgz#697c25c3dfe7435dd79fcd58c38a135888eaf05e" + integrity sha512-W04AqnILOL/sPRXziNicCjSNRruLAuIHEOVBazepu0545DDNGYHz7ar9ZgZ1fMU8/MA4mVxp5rkBWRi6OXIy3Q== + dependencies: + asap "~2.0.6" + +promise@~1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/promise/-/promise-1.3.0.tgz#e5cc9a4c8278e4664ffedc01c7da84842b040175" + integrity sha512-R9WrbTF3EPkVtWjp7B7umQGVndpsi+rsDAfrR4xAALQpFLa/+2OriecLhawxzvii2gd9+DZFwROWDuUUaqS5yA== + dependencies: + is-promise "~1" + +promisify-es6@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/promisify-es6/-/promisify-es6-1.0.3.tgz#b012668c4df3c965ce13daac2b3a4d1726a96346" + integrity sha512-N9iVG+CGJsI4b4ZGazjwLnxErD2d9Pe4DPvvXSxYA9tFNu8ymXME4Qs5HIQ0LMJpNM7zj+m0NlNnNeqFpKzqnA== + +prop-types@^15.7.2: + version "15.7.2" + resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.7.2.tgz#52c41e75b8c87e72b9d9360e0206b99dcbffa6c5" + integrity sha512-8QQikdH7//R2vurIJSutZ1smHYTcLpRWEOlHnzcWHmBYrOGUysKwSsrC89BCiFj3CbrfJ/nXFdJepOVrY1GCHQ== + dependencies: + loose-envify "^1.4.0" + object-assign "^4.1.1" + react-is "^16.8.1" + +protocol-buffers-schema@^3.3.1: + version "3.6.0" + resolved "https://registry.yarnpkg.com/protocol-buffers-schema/-/protocol-buffers-schema-3.6.0.tgz#77bc75a48b2ff142c1ad5b5b90c94cd0fa2efd03" + integrity sha512-TdDRD+/QNdrCGCE7v8340QyuXd4kIWIgapsE2+n/SaGiSSbomYl4TjHlvIoCWRpE7wFt02EpB35VVA2ImcBVqw== + +protons@^1.0.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/protons/-/protons-1.2.1.tgz#5f1e0db8b2139469cd1c3b4e332a4c2d95d0a218" + integrity sha512-2oqDyc/SN+tNcJf8XxrXhYL7sQn2/OMl8mSdD7NVGsWjMEmAbks4eDVnCyf0vAoRbBWyWTEXWk4D8XfuKVl3zg== + dependencies: + buffer "^5.5.0" + protocol-buffers-schema "^3.3.1" + signed-varint "^2.0.1" + varint "^5.0.0" + +proxy-addr@~2.0.7: + version "2.0.7" + resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-2.0.7.tgz#f19fe69ceab311eeb94b42e70e8c2070f9ba1025" + integrity sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg== + dependencies: + forwarded "0.2.0" + ipaddr.js "1.9.1" + +prr@~1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/prr/-/prr-1.0.1.tgz#d3fc114ba06995a45ec6893f484ceb1d78f5f476" + integrity sha1-0/wRS6BplaRexok/SEzrHXj19HY= + +psl@^1.1.28: + version "1.9.0" + resolved "https://registry.yarnpkg.com/psl/-/psl-1.9.0.tgz#d0df2a137f00794565fcaf3b2c00cd09f8d5a5a7" + integrity sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag== + +psl@^1.1.33: + version "1.8.0" + resolved "https://registry.yarnpkg.com/psl/-/psl-1.8.0.tgz#9326f8bcfb013adcc005fdff056acce020e51c24" + integrity sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ== + +public-encrypt@^4.0.0: + version "4.0.3" + resolved "https://registry.yarnpkg.com/public-encrypt/-/public-encrypt-4.0.3.tgz#4fcc9d77a07e48ba7527e7cbe0de33d0701331e0" + integrity sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q== + dependencies: + bn.js "^4.1.0" + browserify-rsa "^4.0.0" + create-hash "^1.1.0" + parse-asn1 "^5.0.0" + randombytes "^2.0.1" + safe-buffer "^5.1.2" + +pull-defer@~0.2.3: + version "0.2.3" + resolved "https://registry.yarnpkg.com/pull-defer/-/pull-defer-0.2.3.tgz#4ee09c6d9e227bede9938db80391c3dac489d113" + integrity sha512-/An3KE7mVjZCqNhZsr22k1Tx8MACnUnHZZNPSJ0S62td8JtYr/AiRG42Vz7Syu31SoTLUzVIe61jtT/pNdjVYA== + +pull-stream@^3.2.3, pull-stream@^3.6.9: + version "3.6.14" + resolved "https://registry.yarnpkg.com/pull-stream/-/pull-stream-3.6.14.tgz#529dbd5b86131f4a5ed636fdf7f6af00781357ee" + integrity sha512-KIqdvpqHHaTUA2mCYcLG1ibEbu/LCKoJZsBWyv9lSYtPkJPBq8m3Hxa103xHi6D2thj5YXa0TqK3L3GUkwgnew== + +pull-to-stream@~0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/pull-to-stream/-/pull-to-stream-0.1.1.tgz#fa2058528528e3542b81d6f17cbc42288508ff37" + integrity sha512-thZkMv6F9PILt9zdvpI2gxs19mkDrlixYKX6cOBxAW16i1NZH+yLAmF4r8QfJ69zuQh27e01JZP9y27tsH021w== + dependencies: + readable-stream "^3.1.1" + +pump@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/pump/-/pump-1.0.3.tgz#5dfe8311c33bbf6fc18261f9f34702c47c08a954" + integrity sha512-8k0JupWme55+9tCVE+FS5ULT3K6AbgqrGa58lTT49RpyfwwcGedHqaC5LlQNdEAumn/wFsu6aPwkuPMioy8kqw== + dependencies: + end-of-stream "^1.1.0" + once "^1.3.1" + +pump@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/pump/-/pump-3.0.0.tgz#b4a2116815bde2f4e1ea602354e8c75565107a64" + integrity sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww== + dependencies: + end-of-stream "^1.1.0" + once "^1.3.1" + +punycode@2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.0.tgz#5f863edc89b96db09074bad7947bf09056ca4e7d" + integrity sha1-X4Y+3Im5bbCQdLrXlHvwkFbKTn0= + +punycode@^2.1.0, punycode@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec" + integrity sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A== + +pure-rand@^4.1.1: + version "4.1.2" + resolved "https://registry.yarnpkg.com/pure-rand/-/pure-rand-4.1.2.tgz#cbad2a3e3ea6df0a8d80d8ba204779b5679a5205" + integrity sha512-uLzZpQWfroIqyFWmX/pl0OL2JHJdoU3dbh0dvZ25fChHFJJi56J5oQZhW6QgbT2Llwh1upki84LnTwlZvsungA== + +qs@6.11.0, qs@^6.4.0, qs@^6.5.2: + version "6.11.0" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.11.0.tgz#fd0d963446f7a65e1367e01abd85429453f0c37a" + integrity sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q== + dependencies: + side-channel "^1.0.4" + +qs@~6.5.2: + version "6.5.3" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.3.tgz#3aeeffc91967ef6e35c0e488ef46fb296ab76aad" + integrity sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA== + +query-string@^5.0.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/query-string/-/query-string-5.1.1.tgz#a78c012b71c17e05f2e3fa2319dd330682efb3cb" + integrity sha512-gjWOsm2SoGlgLEdAGt7a6slVOk9mGiXmPFMqrEhLQ68rhQuBnpfs3+EmlvqKyxnCo9/PPlF+9MtY02S1aFg+Jw== + dependencies: + decode-uri-component "^0.2.0" + object-assign "^4.1.0" + strict-uri-encode "^1.0.0" + +querystring@0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/querystring/-/querystring-0.2.0.tgz#b209849203bb25df820da756e747005878521620" + integrity sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA= + +querystring@^0.2.0: + version "0.2.1" + resolved "https://registry.yarnpkg.com/querystring/-/querystring-0.2.1.tgz#40d77615bb09d16902a85c3e38aa8b5ed761c2dd" + integrity sha512-wkvS7mL/JMugcup3/rMitHmd9ecIGd2lhFhK9N3UUQ450h66d1r3Y9nvXzQAW1Lq+wyx61k/1pfKS5KuKiyEbg== + +querystringify@^2.1.1: + version "2.2.0" + resolved "https://registry.yarnpkg.com/querystringify/-/querystringify-2.2.0.tgz#3345941b4153cb9d082d8eee4cda2016a9aef7f6" + integrity sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ== + +queue-microtask@^1.2.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/queue-microtask/-/queue-microtask-1.2.2.tgz#abf64491e6ecf0f38a6502403d4cda04f372dfd3" + integrity sha512-dB15eXv3p2jDlbOiNLyMabYg1/sXvppd8DP2J3EOCQ0AkuSXCW2tP7mnVouVLJKgUMY6yP0kcQDVpLCN13h4Xg== + +ramda@^0.24.1: + version "0.24.1" + resolved "https://registry.yarnpkg.com/ramda/-/ramda-0.24.1.tgz#c3b7755197f35b8dc3502228262c4c91ddb6b857" + integrity sha512-HEm619G8PaZMfkqCa23qiOe7r3R0brPu7ZgOsgKUsnvLhd0qhc/vTjkUovomgPWa5ECBa08fJZixth9LaoBo5w== + +ramda@^0.25.0: + version "0.25.0" + resolved "https://registry.yarnpkg.com/ramda/-/ramda-0.25.0.tgz#8fdf68231cffa90bc2f9460390a0cb74a29b29a9" + integrity sha512-GXpfrYVPwx3K7RQ6aYT8KPS8XViSXUVJT1ONhoKPE9VAleW42YE+U+8VEyGWt41EnEQW7gwecYJriTI0pKoecQ== + +ramdasauce@^2.1.0: + version "2.1.3" + resolved "https://registry.yarnpkg.com/ramdasauce/-/ramdasauce-2.1.3.tgz#acb45ecc7e4fc4d6f39e19989b4a16dff383e9c2" + integrity sha512-Ml3CPim4SKwmg5g9UI77lnRSeKr/kQw7YhQ6rfdMcBYy6DMlwmkEwQqjygJ3OhxPR+NfFfpjKl3Tf8GXckaqqg== + dependencies: + ramda "^0.24.1" + +randomatic@^3.0.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-3.1.1.tgz#b776efc59375984e36c537b2f51a1f0aff0da1ed" + integrity sha512-TuDE5KxZ0J461RVjrJZCJc+J+zCkTb1MbH9AQUq68sMhOMcy9jLcb3BrZKgp9q9Ncltdg4QVqWrH02W2EFFVYw== + dependencies: + is-number "^4.0.0" + kind-of "^6.0.0" + math-random "^1.0.1" + +randombytes@^2.0.0, randombytes@^2.0.1, randombytes@^2.0.5, randombytes@^2.0.6, randombytes@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/randombytes/-/randombytes-2.1.0.tgz#df6f84372f0270dc65cdf6291349ab7a473d4f2a" + integrity sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ== + dependencies: + safe-buffer "^5.1.0" + +randomfill@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/randomfill/-/randomfill-1.0.4.tgz#c92196fc86ab42be983f1bf31778224931d61458" + integrity sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw== + dependencies: + randombytes "^2.0.5" + safe-buffer "^5.1.0" + +range-parser@~1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.1.tgz#3cf37023d199e1c24d1a55b84800c2f3e6468031" + integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg== + +raw-body@2.5.1: + version "2.5.1" + resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.5.1.tgz#fe1b1628b181b700215e5fd42389f98b71392857" + integrity sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig== + dependencies: + bytes "3.1.2" + http-errors "2.0.0" + iconv-lite "0.4.24" + unpipe "1.0.0" + +rc@^1.2.7: + version "1.2.8" + resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.8.tgz#cd924bf5200a075b83c188cd6b9e211b7fc0d3ed" + integrity sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw== + dependencies: + deep-extend "^0.6.0" + ini "~1.3.0" + minimist "^1.2.0" + strip-json-comments "~2.0.1" + +react-is@^16.7.0, react-is@^16.8.1: + version "16.13.1" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" + integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== + +read-pkg-up@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-1.0.1.tgz#9d63c13276c065918d57f002a57f40a1b643fb02" + integrity sha1-nWPBMnbAZZGNV/ACpX9AobZD+wI= + dependencies: + find-up "^1.0.0" + read-pkg "^1.0.0" + +read-pkg@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-1.1.0.tgz#f5ffaa5ecd29cb31c0474bca7d756b6bb29e3f28" + integrity sha1-9f+qXs0pyzHAR0vKfXVra7KePyg= + dependencies: + load-json-file "^1.0.0" + normalize-package-data "^2.3.2" + path-type "^1.0.0" + +readable-stream@1.0.33: + version "1.0.33" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.33.tgz#3a360dd66c1b1d7fd4705389860eda1d0f61126c" + integrity sha1-OjYN1mwbHX/UcFOJhg7aHQ9hEmw= + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.1" + isarray "0.0.1" + string_decoder "~0.10.x" + +readable-stream@1.1: + version "1.1.13" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.1.13.tgz#f6eef764f514c89e2b9e23146a75ba106756d23e" + integrity sha1-9u73ZPUUyJ4rniMUanW6EGdW0j4= + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.1" + isarray "0.0.1" + string_decoder "~0.10.x" + +readable-stream@1.1.14, readable-stream@^1.0.33: + version "1.1.14" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.1.14.tgz#7cf4c54ef648e3813084c636dd2079e166c081d9" + integrity sha1-fPTFTvZI44EwhMY23SB54WbAgdk= + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.1" + isarray "0.0.1" + string_decoder "~0.10.x" + +"readable-stream@2 || 3", readable-stream@^3.0.0, readable-stream@^3.0.1, readable-stream@^3.0.2, readable-stream@^3.1.1, readable-stream@^3.4.0, readable-stream@^3.6.0: + version "3.6.0" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.6.0.tgz#337bbda3adc0706bd3e024426a286d4b4b2c9198" + integrity sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA== + dependencies: + inherits "^2.0.3" + string_decoder "^1.1.1" + util-deprecate "^1.0.1" + +"readable-stream@>=1.0.33-1 <1.1.0-0", readable-stream@~1.0.15, readable-stream@~1.0.26-4: + version "1.0.34" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-1.0.34.tgz#125820e34bc842d2f2aaafafe4c2916ee32c157c" + integrity sha1-Elgg40vIQtLyqq+v5MKRbuMsFXw= + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.1" + isarray "0.0.1" + string_decoder "~0.10.x" + +readable-stream@^2.0.0, readable-stream@^2.0.1, readable-stream@^2.0.4, readable-stream@^2.0.5, readable-stream@^2.0.6, readable-stream@^2.2.2, readable-stream@^2.2.9, readable-stream@^2.3.0, readable-stream@^2.3.5, readable-stream@~2.3.6: + version "2.3.7" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.7.tgz#1eca1cf711aef814c04f62252a36a62f6cb23b57" + integrity sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw== + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.3" + isarray "~1.0.0" + process-nextick-args "~2.0.0" + safe-buffer "~5.1.1" + string_decoder "~1.1.1" + util-deprecate "~1.0.1" + +readable-stream@~0.0.2: + version "0.0.4" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-0.0.4.tgz#f32d76e3fb863344a548d79923007173665b3b8d" + integrity sha1-8y124/uGM0SlSNeZIwBxc2ZbO40= + +readable-stream@~2.0.0: + version "2.0.6" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.0.6.tgz#8f90341e68a53ccc928788dacfcd11b36eb9b78e" + integrity sha1-j5A0HmilPMySh4jaz80Rs265t44= + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.1" + isarray "~1.0.0" + process-nextick-args "~1.0.6" + string_decoder "~0.10.x" + util-deprecate "~1.0.1" + +readdirp@~3.4.0: + version "3.4.0" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-3.4.0.tgz#9fdccdf9e9155805449221ac645e8303ab5b9ada" + integrity sha512-0xe001vZBnJEK+uKcj8qOhyAKPzIT+gStxWr3LCB0DwcXR5NZJ3IaC+yGnHCYzB/S7ov3m3EEbZI2zeNvX+hGQ== + dependencies: + picomatch "^2.2.1" + +readdirp@~3.5.0: + version "3.5.0" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-3.5.0.tgz#9ba74c019b15d365278d2e91bb8c48d7b4d42c9e" + integrity sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ== + dependencies: + picomatch "^2.2.1" + +redux-cli-logger@^2.0.1: + version "2.1.0" + resolved "https://registry.yarnpkg.com/redux-cli-logger/-/redux-cli-logger-2.1.0.tgz#7e546502a4b08c7fac4fe2faee2326a6326cb4a1" + integrity sha512-75mVsggAJRSykWy2qxdGI7osocDWvc3RCMeN93hlvS/FxgdRww12NaXslez+W6gBOrSJKO7W16V0IzuISSfCxg== + dependencies: + colors "^1.1.2" + +redux-devtools-core@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/redux-devtools-core/-/redux-devtools-core-0.2.1.tgz#4e43cbe590a1f18c13ee165d2d42e0bc77a164d8" + integrity sha512-RAGOxtUFdr/1USAvxrWd+Gq/Euzgw7quCZlO5TgFpDfG7rB5tMhZUrNyBjpzgzL2yMk0eHnPYIGm7NkIfRzHxQ== + dependencies: + get-params "^0.1.2" + jsan "^3.1.13" + lodash "^4.17.11" + nanoid "^2.0.0" + remotedev-serialize "^0.1.8" + +redux-devtools-instrument@^1.9.4: + version "1.10.0" + resolved "https://registry.yarnpkg.com/redux-devtools-instrument/-/redux-devtools-instrument-1.10.0.tgz#036caf79fa1e5f25ec4bae38a9af4f08c69e323a" + integrity sha512-X8JRBCzX2ADSMp+iiV7YQ8uoTNyEm0VPFPd4T854coz6lvRiBrFSqAr9YAS2n8Kzxx8CJQotR0QF9wsMM+3DvA== + dependencies: + lodash "^4.17.19" + symbol-observable "^1.2.0" + +redux-saga@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/redux-saga/-/redux-saga-1.0.0.tgz#acb8b3ed9180fecbe75f342011d75af3ac11045b" + integrity sha512-GvJWs/SzMvEQgeaw6sRMXnS2FghlvEGsHiEtTLpJqc/FHF3I5EE/B+Hq5lyHZ8LSoT2r/X/46uWvkdCnK9WgHA== + dependencies: + "@redux-saga/core" "^1.0.0" + +redux@^3.7.2: + version "3.7.2" + resolved "https://registry.yarnpkg.com/redux/-/redux-3.7.2.tgz#06b73123215901d25d065be342eb026bc1c8537b" + integrity sha512-pNqnf9q1hI5HHZRBkj3bAngGZW/JMCmexDlOxw4XagXY2o1327nHH54LoTjiPJ0gizoqPDRqWyX/00g0hD6w+A== + dependencies: + lodash "^4.2.1" + lodash-es "^4.2.1" + loose-envify "^1.1.0" + symbol-observable "^1.0.3" + +redux@^4.0.4: + version "4.0.5" + resolved "https://registry.yarnpkg.com/redux/-/redux-4.0.5.tgz#4db5de5816e17891de8a80c424232d06f051d93f" + integrity sha512-VSz1uMAH24DM6MF72vcojpYPtrTUu3ByVWfPL1nPfVRb5mZVTve5GnNCUV53QM/BZ66xfWrm0CTWoM+Xlz8V1w== + dependencies: + loose-envify "^1.4.0" + symbol-observable "^1.2.0" + +regenerator-runtime@^0.10.5: + version "0.10.5" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.10.5.tgz#336c3efc1220adcedda2c9fab67b5a7955a33658" + integrity sha1-M2w+/BIgrc7dosn6tntaeVWjNlg= + +regenerator-runtime@^0.11.0: + version "0.11.1" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz#be05ad7f9bf7d22e056f9726cee5017fbf19e2e9" + integrity sha512-MguG95oij0fC3QV3URf4V2SDYGJhJnJGqvIIgdECeODCT98wSWDAJ94SSuVpYQUoTcGUIL6L4yNB7j1DFFHSBg== + +regenerator-runtime@^0.13.4: + version "0.13.9" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.9.tgz#8925742a98ffd90814988d7566ad30ca3b263b52" + integrity sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA== + +regex-cache@^0.4.2: + version "0.4.4" + resolved "https://registry.yarnpkg.com/regex-cache/-/regex-cache-0.4.4.tgz#75bdc58a2a1496cec48a12835bc54c8d562336dd" + integrity sha512-nVIZwtCjkC9YgvWkpM55B5rBhBYRZhAaJbgcFYXXsHnbZ9UZI9nnVWYZpBlCqv9ho2eZryPnWrZGsOdPwVWXWQ== + dependencies: + is-equal-shallow "^0.1.3" + +relay-compiler@10.1.0: + version "10.1.0" + resolved "https://registry.yarnpkg.com/relay-compiler/-/relay-compiler-10.1.0.tgz#fb4672cdbe9b54869a3a79759edd8c2d91609cbe" + integrity sha512-HPqc3N3tNgEgUH5+lTr5lnLbgnsZMt+MRiyS0uAVNhuPY2It0X1ZJG+9qdA3L9IqKFUNwVn6zTO7RArjMZbARQ== + dependencies: + "@babel/core" "^7.0.0" + "@babel/generator" "^7.5.0" + "@babel/parser" "^7.0.0" + "@babel/runtime" "^7.0.0" + "@babel/traverse" "^7.0.0" + "@babel/types" "^7.0.0" + babel-preset-fbjs "^3.3.0" + chalk "^4.0.0" + fb-watchman "^2.0.0" + fbjs "^3.0.0" + glob "^7.1.1" + immutable "~3.7.6" + nullthrows "^1.1.1" + relay-runtime "10.1.0" + signedsource "^1.0.0" + yargs "^15.3.1" + +relay-runtime@10.1.0: + version "10.1.0" + resolved "https://registry.yarnpkg.com/relay-runtime/-/relay-runtime-10.1.0.tgz#4753bf36e95e8d862cef33608e3d98b4ed730d16" + integrity sha512-bxznLnQ1ST6APN/cFi7l0FpjbZVchWQjjhj9mAuJBuUqNNCh9uV+UTRhpQF7Q8ycsPp19LHTpVyGhYb0ustuRQ== + dependencies: + "@babel/runtime" "^7.0.0" + fbjs "^3.0.0" + +remote-redux-devtools@^0.5.12: + version "0.5.16" + resolved "https://registry.yarnpkg.com/remote-redux-devtools/-/remote-redux-devtools-0.5.16.tgz#95b1a4a1988147ca04f3368f3573b661748b3717" + integrity sha512-xZ2D1VRIWzat5nsvcraT6fKEX9Cfi+HbQBCwzNnUAM8Uicm/anOc60XGalcaDPrVmLug7nhDl2nimEa3bL3K9w== + dependencies: + jsan "^3.1.13" + querystring "^0.2.0" + redux-devtools-core "^0.2.1" + redux-devtools-instrument "^1.9.4" + rn-host-detect "^1.1.5" + socketcluster-client "^14.2.1" + +remotedev-serialize@^0.1.8: + version "0.1.9" + resolved "https://registry.yarnpkg.com/remotedev-serialize/-/remotedev-serialize-0.1.9.tgz#5e67e05cbca75d408d769d057dc59d0f56cd2c43" + integrity sha512-5tFdZg9mSaAWTv6xmQ7HtHjKMLSFQFExEZOtJe10PLsv1wb7cy7kYHtBvTYRro27/3fRGEcQBRNKSaixOpb69w== + dependencies: + jsan "^3.1.13" + +remove-trailing-separator@^1.0.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz#c24bce2a283adad5bc3f58e0d48249b92379d8ef" + integrity sha1-wkvOKig62tW8P1jg1IJJuSN52O8= + +repeat-element@^1.1.2: + version "1.1.3" + resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.3.tgz#782e0d825c0c5a3bb39731f84efee6b742e6b1ce" + integrity sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g== + +repeat-string@^1.5.2: + version "1.6.1" + resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637" + integrity sha1-jcrkcOHIirwtYA//Sndihtp15jc= + +repeating@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/repeating/-/repeating-2.0.1.tgz#5214c53a926d3552707527fbab415dbc08d06dda" + integrity sha1-UhTFOpJtNVJwdSf7q0FdvAjQbdo= + dependencies: + is-finite "^1.0.0" + +replace-ext@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/replace-ext/-/replace-ext-0.0.1.tgz#29bbd92078a739f0bcce2b4ee41e837953522924" + integrity sha1-KbvZIHinOfC8zitO5B6DeVNSKSQ= + +request@2.88.2, request@^2.55.0, request@^2.79.0, request@^2.85.0: + version "2.88.2" + resolved "https://registry.yarnpkg.com/request/-/request-2.88.2.tgz#d73c918731cb5a87da047e207234146f664d12b3" + integrity sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw== + dependencies: + aws-sign2 "~0.7.0" + aws4 "^1.8.0" + caseless "~0.12.0" + combined-stream "~1.0.6" + extend "~3.0.2" + forever-agent "~0.6.1" + form-data "~2.3.2" + har-validator "~5.1.3" + http-signature "~1.2.0" + is-typedarray "~1.0.0" + isstream "~0.1.2" + json-stringify-safe "~5.0.1" + mime-types "~2.1.19" + oauth-sign "~0.9.0" + performance-now "^2.1.0" + qs "~6.5.2" + safe-buffer "^5.1.2" + tough-cookie "~2.5.0" + tunnel-agent "^0.6.0" + uuid "^3.3.2" + +require-directory@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" + integrity sha1-jGStX9MNqxyXbiNE/+f3kqam30I= + +require-from-string@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/require-from-string/-/require-from-string-2.0.2.tgz#89a7fdd938261267318eafe14f9c32e598c36909" + integrity sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw== + +require-main-filename@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-1.0.1.tgz#97f717b69d48784f5f526a6c5aa8ffdda055a4d1" + integrity sha1-l/cXtp1IeE9fUmpsWqj/3aBVpNE= + +require-main-filename@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-2.0.0.tgz#d0b329ecc7cc0f61649f62215be69af54aa8989b" + integrity sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg== + +requires-port@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff" + integrity sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8= + +reselect-tree@^1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/reselect-tree/-/reselect-tree-1.3.4.tgz#449629728e2dc79bf0602571ec8859ac34737089" + integrity sha512-1OgNq1IStyJFqIqOoD3k3Ge4SsYCMP9W88VQOfvgyLniVKLfvbYO1Vrl92SyEK5021MkoBX6tWb381VxTDyPBQ== + dependencies: + debug "^3.1.0" + esdoc "^1.0.4" + json-pointer "^0.6.0" + reselect "^4.0.0" + source-map-support "^0.5.3" + +reselect@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/reselect/-/reselect-4.0.0.tgz#f2529830e5d3d0e021408b246a206ef4ea4437f7" + integrity sha512-qUgANli03jjAyGlnbYVAV5vvnOmJnODyABz51RdBN7M4WaVu8mecZWgyQNkG8Yqe3KRGRt0l4K4B3XVEULC4CA== + +resolve-from@5.0.0, resolve-from@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-5.0.0.tgz#c35225843df8f776df21c57557bc087e9dfdfc69" + integrity sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw== + +resolve-from@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" + integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g== + +resolve-url@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/resolve-url/-/resolve-url-0.2.1.tgz#2c637fe77c893afd2a663fe21aa9080068e2052a" + integrity sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo= + +resolve@^1.10.0, resolve@^1.14.2: + version "1.20.0" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.20.0.tgz#629a013fb3f70755d6f0b7935cc1c2c5378b1975" + integrity sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A== + dependencies: + is-core-module "^2.2.0" + path-parse "^1.0.6" + +responselike@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/responselike/-/responselike-1.0.2.tgz#918720ef3b631c5642be068f15ade5a46f4ba1e7" + integrity sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec= + dependencies: + lowercase-keys "^1.0.0" + +restore-cursor@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-2.0.0.tgz#9f7ee287f82fd326d4fd162923d62129eee0dfaf" + integrity sha1-n37ih/gv0ybU/RYpI9YhKe7g368= + dependencies: + onetime "^2.0.0" + signal-exit "^3.0.2" + +restore-cursor@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-3.1.0.tgz#39f67c54b3a7a58cea5236d95cf0034239631f7e" + integrity sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA== + dependencies: + onetime "^5.1.0" + signal-exit "^3.0.2" + +retry@0.13.1: + version "0.13.1" + resolved "https://registry.yarnpkg.com/retry/-/retry-0.13.1.tgz#185b1587acf67919d63b357349e03537b2484658" + integrity sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg== + +reusify@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/reusify/-/reusify-1.0.4.tgz#90da382b1e126efc02146e90845a88db12925d76" + integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw== + +rimraf@^2.2.8, rimraf@^2.6.1, rimraf@^2.6.3: + version "2.7.1" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.7.1.tgz#35797f13a7fdadc566142c29d4f07ccad483e3ec" + integrity sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w== + dependencies: + glob "^7.1.3" + +rimraf@^3.0.0, rimraf@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-3.0.2.tgz#f1a5402ba6220ad52cc1282bac1ae3aa49fd061a" + integrity sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA== + dependencies: + glob "^7.1.3" + +ripemd160@^2.0.0, ripemd160@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/ripemd160/-/ripemd160-2.0.2.tgz#a1c1a6f624751577ba5d07914cbc92850585890c" + integrity sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA== + dependencies: + hash-base "^3.0.0" + inherits "^2.0.1" + +rlp@^2.0.0, rlp@^2.2.3: + version "2.2.6" + resolved "https://registry.yarnpkg.com/rlp/-/rlp-2.2.6.tgz#c80ba6266ac7a483ef1e69e8e2f056656de2fb2c" + integrity sha512-HAfAmL6SDYNWPUOJNrM500x4Thn4PZsEy5pijPh40U9WfNk0z15hUYzO9xVIMAdIHdFtD8CBDHd75Td1g36Mjg== + dependencies: + bn.js "^4.11.1" + +rlp@^2.2.4: + version "2.2.7" + resolved "https://registry.yarnpkg.com/rlp/-/rlp-2.2.7.tgz#33f31c4afac81124ac4b283e2bd4d9720b30beaf" + integrity sha512-d5gdPmgQ0Z+AklL2NVXr/IoSjNZFfTVvQWzL/AM2AOcSzYP2xjlb0AC8YyCLc41MSNf6P6QVtjgPdmVtzb+4lQ== + dependencies: + bn.js "^5.2.0" + +rn-host-detect@^1.1.5: + version "1.2.0" + resolved "https://registry.yarnpkg.com/rn-host-detect/-/rn-host-detect-1.2.0.tgz#8b0396fc05631ec60c1cb8789e5070cdb04d0da0" + integrity sha512-btNg5kzHcjZZ7t7mvvV/4wNJ9e3MPgrWivkRgWURzXL0JJ0pwWlU4zrbmdlz3HHzHOxhBhHB4D+/dbMFfu4/4A== + +rsa-pem-to-jwk@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/rsa-pem-to-jwk/-/rsa-pem-to-jwk-1.1.3.tgz#245e76bdb7e7234cfee7ca032d31b54c38fab98e" + integrity sha512-ZlVavEvTnD8Rzh/pdB8NH4VF5GNEtF6biGQcTtC4GKFMsbZR08oHtOYefbhCN+JnJIuMItiCDCMycdcMrw6blA== + dependencies: + object-assign "^2.0.0" + rsa-unpack "0.0.6" + +rsa-unpack@0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/rsa-unpack/-/rsa-unpack-0.0.6.tgz#f50ebd56a628378e631f297161026ce9ab4eddba" + integrity sha512-HRrl8GHjjPziPFRDJPq/v5OxZ3IPdksV5h3cime/oHgcgM1k1toO5OdtzClgBqRf5dF6IgptOB0g/zFb0w5zQw== + dependencies: + optimist "~0.3.5" + +run-parallel@^1.1.9: + version "1.2.0" + resolved "https://registry.yarnpkg.com/run-parallel/-/run-parallel-1.2.0.tgz#66d1368da7bdf921eb9d95bd1a9229e7f21a43ee" + integrity sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA== + dependencies: + queue-microtask "^1.2.2" + +rustbn.js@~0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/rustbn.js/-/rustbn.js-0.2.0.tgz#8082cb886e707155fd1cb6f23bd591ab8d55d0ca" + integrity sha512-4VlvkRUuCJvr2J6Y0ImW7NvTCriMi7ErOAqWk1y69vAdoNIzCF3yPmgeNzx+RQTLEDFq5sHfscn1MwHxP9hNfA== + +safe-buffer@5.2.1, safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@^5.2.0, safe-buffer@^5.2.1, safe-buffer@~5.2.0: + version "5.2.1" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" + integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== + +safe-buffer@~5.1.0, safe-buffer@~5.1.1: + version "5.1.2" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" + integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== + +safe-event-emitter@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/safe-event-emitter/-/safe-event-emitter-1.0.1.tgz#5b692ef22329ed8f69fdce607e50ca734f6f20af" + integrity sha512-e1wFe99A91XYYxoQbcq2ZJUWurxEyP8vfz7A7vuUe1s95q8r5ebraVaA1BukYJcpM6V16ugWoD9vngi8Ccu5fg== + dependencies: + events "^3.0.0" + +"safer-buffer@>= 2.1.2 < 3", "safer-buffer@>= 2.1.2 < 3.0.0", safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0: + version "2.1.2" + resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" + integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== + +sax@^1.1.4, sax@^1.2.4: + version "1.2.4" + resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9" + integrity sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw== + +sc-channel@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/sc-channel/-/sc-channel-1.2.0.tgz#d9209f3a91e3fa694c66b011ce55c4ad8c3087d9" + integrity sha512-M3gdq8PlKg0zWJSisWqAsMmTVxYRTpVRqw4CWAdKBgAfVKumFcTjoCV0hYu7lgUXccCtCD8Wk9VkkE+IXCxmZA== + dependencies: + component-emitter "1.2.1" + +sc-errors@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/sc-errors/-/sc-errors-2.0.1.tgz#3af2d934dfd82116279a4b2c1552c1e021ddcb03" + integrity sha512-JoVhq3Ud+3Ujv2SIG7W0XtjRHsrNgl6iXuHHsh0s+Kdt5NwI6N2EGAZD4iteitdDv68ENBkpjtSvN597/wxPSQ== + +sc-formatter@^3.0.1: + version "3.0.2" + resolved "https://registry.yarnpkg.com/sc-formatter/-/sc-formatter-3.0.2.tgz#9abdb14e71873ce7157714d3002477bbdb33c4e6" + integrity sha512-9PbqYBpCq+OoEeRQ3QfFIGE6qwjjBcd2j7UjgDlhnZbtSnuGgHdcRklPKYGuYFH82V/dwd+AIpu8XvA1zqTd+A== + +scrypt-js@2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/scrypt-js/-/scrypt-js-2.0.4.tgz#32f8c5149f0797672e551c07e230f834b6af5f16" + integrity sha512-4KsaGcPnuhtCZQCxFxN3GVYIhKFPTdLd8PLC552XwbMndtD0cjRFAhDuuydXQ0h08ZfPgzqe6EKHozpuH74iDw== + +scrypt-js@^3.0.0, scrypt-js@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/scrypt-js/-/scrypt-js-3.0.1.tgz#d314a57c2aef69d1ad98a138a21fe9eafa9ee312" + integrity sha512-cdwTTnqPu0Hyvf5in5asVdZocVDTNRmR7XEcJuIzMjJeSHybHl7vpB66AzwTaIg6CLSbtjcxc8fqcySfnTkccA== + +secp256k1@^3.6.2: + version "3.8.0" + resolved "https://registry.yarnpkg.com/secp256k1/-/secp256k1-3.8.0.tgz#28f59f4b01dbee9575f56a47034b7d2e3b3b352d" + integrity sha512-k5ke5avRZbtl9Tqx/SA7CbY3NF6Ro+Sj9cZxezFzuBlLDmyqPiL8hJJ+EmzD8Ig4LUDByHJ3/iPOVoRixs/hmw== + dependencies: + bindings "^1.5.0" + bip66 "^1.1.5" + bn.js "^4.11.8" + create-hash "^1.2.0" + drbg.js "^1.0.1" + elliptic "^6.5.2" + nan "^2.14.0" + safe-buffer "^5.1.2" + +secp256k1@^4.0.1: + version "4.0.3" + resolved "https://registry.yarnpkg.com/secp256k1/-/secp256k1-4.0.3.tgz#c4559ecd1b8d3c1827ed2d1b94190d69ce267303" + integrity sha512-NLZVf+ROMxwtEj3Xa562qgv2BK5e2WNmXPiOdVIPLgs6lyTzMvBq0aWTYMI5XCP9jZMVKOcqZLw/Wc4vDkuxhA== + dependencies: + elliptic "^6.5.4" + node-addon-api "^2.0.0" + node-gyp-build "^4.2.0" + +semaphore@>=1.0.1, semaphore@^1.0.3: + version "1.1.0" + resolved "https://registry.yarnpkg.com/semaphore/-/semaphore-1.1.0.tgz#aaad8b86b20fe8e9b32b16dc2ee682a8cd26a8aa" + integrity sha512-O4OZEaNtkMd/K0i6js9SL+gqy0ZCBMgUvlSqHKi4IBdjhe7wB8pwztUk1BbZ1fmrvpwFrPbHzqd2w5pTcJH6LA== + +"semver@2 || 3 || 4 || 5", semver@^5.3.0, semver@^5.5.0: + version "5.7.1" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.1.tgz#a954f931aeba508d307bbf069eff0c01c96116f7" + integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ== + +semver@7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.0.0.tgz#5f3ca35761e47e05b206c6daff2cf814f0316b8e" + integrity sha512-+GB6zVA9LWh6zovYQLALHwv5rb2PHGlJi3lfiqIHxR0uuwCgefcOJc59v9fv1w8GbStwxuuqqAjI9NMAOOgq1A== + +semver@7.3.5: + version "7.3.5" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.3.5.tgz#0b621c879348d8998e4b0e4be94b3f12e6018ef7" + integrity sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ== + dependencies: + lru-cache "^6.0.0" + +semver@^6.1.1, semver@^6.1.2, semver@^6.3.0: + version "6.3.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d" + integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw== + +semver@^7.0.0: + version "7.3.7" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.3.7.tgz#12c5b649afdbf9049707796e22a4028814ce523f" + integrity sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g== + dependencies: + lru-cache "^6.0.0" + +semver@^7.3.4: + version "7.3.4" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.3.4.tgz#27aaa7d2e4ca76452f98d3add093a72c943edc97" + integrity sha512-tCfb2WLjqFAtXn4KEdxIhalnRtoKFN7nAwj0B3ZXCbQloV2tq5eDbcTmT68JJD3nRJq24/XgxtQKFIpQdtvmVw== + dependencies: + lru-cache "^6.0.0" + +semver@~5.4.1: + version "5.4.1" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.4.1.tgz#e059c09d8571f0540823733433505d3a2f00b18e" + integrity sha512-WfG/X9+oATh81XtllIo/I8gOiY9EXRdv1cQdyykeXK17YcUW3EXUAi2To4pcH6nZtJPr7ZOpM5OMyWJZm+8Rsg== + +send@0.18.0: + version "0.18.0" + resolved "https://registry.yarnpkg.com/send/-/send-0.18.0.tgz#670167cc654b05f5aa4a767f9113bb371bc706be" + integrity sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg== + dependencies: + debug "2.6.9" + depd "2.0.0" + destroy "1.2.0" + encodeurl "~1.0.2" + escape-html "~1.0.3" + etag "~1.8.1" + fresh "0.5.2" + http-errors "2.0.0" + mime "1.6.0" + ms "2.1.3" + on-finished "2.4.1" + range-parser "~1.2.1" + statuses "2.0.1" + +sentence-case@^2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/sentence-case/-/sentence-case-2.1.1.tgz#1f6e2dda39c168bf92d13f86d4a918933f667ed4" + integrity sha1-H24t2jnBaL+S0T+G1KkYkz9mftQ= + dependencies: + no-case "^2.2.0" + upper-case-first "^1.1.2" + +serialize-javascript@4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-4.0.0.tgz#b525e1238489a5ecfc42afacc3fe99e666f4b1aa" + integrity sha512-GaNA54380uFefWghODBWEGisLZFj00nS5ACs6yHa9nLqlLpVLO8ChDGeKRjZnV4Nh4n0Qi7nhYZD/9fCPzEqkw== + dependencies: + randombytes "^2.1.0" + +serve-static@1.15.0: + version "1.15.0" + resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.15.0.tgz#faaef08cffe0a1a62f60cad0c4e513cff0ac9540" + integrity sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g== + dependencies: + encodeurl "~1.0.2" + escape-html "~1.0.3" + parseurl "~1.3.3" + send "0.18.0" + +servify@^0.1.12: + version "0.1.12" + resolved "https://registry.yarnpkg.com/servify/-/servify-0.1.12.tgz#142ab7bee1f1d033b66d0707086085b17c06db95" + integrity sha512-/xE6GvsKKqyo1BAY+KxOWXcLpPsUUyji7Qg3bVD7hh1eRze5bR1uYiuDA/k3Gof1s9BTzQZEJK8sNcNGFIzeWw== + dependencies: + body-parser "^1.16.0" + cors "^2.8.1" + express "^4.14.0" + request "^2.79.0" + xhr "^2.3.3" + +set-blocking@^2.0.0, set-blocking@~2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7" + integrity sha1-BF+XgtARrppoA93TgrJDkrPYkPc= + +set-immediate-shim@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/set-immediate-shim/-/set-immediate-shim-1.0.1.tgz#4b2b1b27eb808a9f8dcc481a58e5e56f599f3f61" + integrity sha1-SysbJ+uAip+NzEgaWOXlb1mfP2E= + +setimmediate@1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/setimmediate/-/setimmediate-1.0.4.tgz#20e81de622d4a02588ce0c8da8973cbcf1d3138f" + integrity sha1-IOgd5iLUoCWIzgyNqJc8vPHTE48= + +setimmediate@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/setimmediate/-/setimmediate-1.0.5.tgz#290cbb232e306942d7d7ea9b83732ab7856f8285" + integrity sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA== + +setprototypeof@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.2.0.tgz#66c9a24a73f9fc28cbe66b09fed3d33dcaf1b424" + integrity sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw== + +sha.js@^2.4.0, sha.js@^2.4.11, sha.js@^2.4.8: + version "2.4.11" + resolved "https://registry.yarnpkg.com/sha.js/-/sha.js-2.4.11.tgz#37a5cf0b81ecbc6943de109ba2960d1b26584ae7" + integrity sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ== + dependencies: + inherits "^2.0.1" + safe-buffer "^5.0.1" + +shallowequal@^1.0.2: + version "1.1.0" + resolved "https://registry.yarnpkg.com/shallowequal/-/shallowequal-1.1.0.tgz#188d521de95b9087404fd4dcb68b13df0ae4e7f8" + integrity sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ== + +shebang-command@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" + integrity sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA== + dependencies: + shebang-regex "^3.0.0" + +shebang-regex@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" + integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== + +side-channel@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.4.tgz#efce5c8fdc104ee751b25c58d4290011fa5ea2cf" + integrity sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw== + dependencies: + call-bind "^1.0.0" + get-intrinsic "^1.0.2" + object-inspect "^1.9.0" + +signal-exit@^3.0.0: + version "3.0.3" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.3.tgz#a1410c2edd8f077b08b4e253c8eacfcaf057461c" + integrity sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA== + +signal-exit@^3.0.2: + version "3.0.7" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" + integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== + +signed-varint@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/signed-varint/-/signed-varint-2.0.1.tgz#50a9989da7c98c2c61dad119bc97470ef8528129" + integrity sha512-abgDPg1106vuZZOvw7cFwdCABddfJRz5akcCcchzTbhyhYnsG31y4AlZEgp315T7W3nQq5P4xeOm186ZiPVFzw== + dependencies: + varint "~5.0.0" + +signedsource@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/signedsource/-/signedsource-1.0.0.tgz#1ddace4981798f93bd833973803d80d52e93ad6a" + integrity sha1-HdrOSYF5j5O9gzlzgD2A1S6TrWo= + +simple-concat@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/simple-concat/-/simple-concat-1.0.1.tgz#f46976082ba35c2263f1c8ab5edfe26c41c9552f" + integrity sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q== + +simple-get@^2.7.0: + version "2.8.2" + resolved "https://registry.yarnpkg.com/simple-get/-/simple-get-2.8.2.tgz#5708fb0919d440657326cd5fe7d2599d07705019" + integrity sha512-Ijd/rV5o+mSBBs4F/x9oDPtTx9Zb6X9brmnXvMW4J7IR15ngi9q5xxqWBKU744jTZiaXtxaPL7uHG6vtN8kUkw== + dependencies: + decompress-response "^3.3.0" + once "^1.3.1" + simple-concat "^1.0.0" + +slash@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/slash/-/slash-1.0.0.tgz#c41f2f6c39fc16d1cd17ad4b5d896114ae470d55" + integrity sha1-xB8vbDn8FtHNF61LXYlhFK5HDVU= + +slash@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634" + integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q== + +snake-case@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/snake-case/-/snake-case-2.1.0.tgz#41bdb1b73f30ec66a04d4e2cad1b76387d4d6d9f" + integrity sha1-Qb2xtz8w7GagTU4srRt2OH1NbZ8= + dependencies: + no-case "^2.2.0" + +socketcluster-client@^14.2.1: + version "14.3.1" + resolved "https://registry.yarnpkg.com/socketcluster-client/-/socketcluster-client-14.3.1.tgz#bfc3591c0cad2668e7b3512a102f3844f5f2e84d" + integrity sha512-Sd/T0K/9UlqTfz+HUuFq90dshA5OBJPQbdkRzGtcKIOm52fkdsBTt0FYpiuzzxv5VrU7PWpRm6KIfNXyPwlLpw== + dependencies: + buffer "^5.2.1" + clone "2.1.1" + component-emitter "1.2.1" + linked-list "0.1.0" + querystring "0.2.0" + sc-channel "^1.2.0" + sc-errors "^2.0.1" + sc-formatter "^3.0.1" + uuid "3.2.1" + ws "7.1.0" + +solc@^0.8.2: + version "0.8.2" + resolved "https://registry.yarnpkg.com/solc/-/solc-0.8.2.tgz#6033d75c6166fd0feb7fe08eddc198aaf52025da" + integrity sha512-fMfcAPaePLfsOY82jqONt0RMh5M8m+pK6QtnMGMUFUm8uEDlUmoqnyLxGVFelosJaVfXhygAB+mTlb+HxiV7DQ== + dependencies: + command-exists "^1.2.8" + commander "3.0.2" + follow-redirects "^1.12.1" + fs-extra "^0.30.0" + js-sha3 "0.8.0" + memorystream "^0.3.1" + require-from-string "^2.0.0" + semver "^5.5.0" + tmp "0.0.33" + +source-map-resolve@^0.5.2: + version "0.5.3" + resolved "https://registry.yarnpkg.com/source-map-resolve/-/source-map-resolve-0.5.3.tgz#190866bece7553e1f8f267a2ee82c606b5509a1a" + integrity sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw== + dependencies: + atob "^2.1.2" + decode-uri-component "^0.2.0" + resolve-url "^0.2.1" + source-map-url "^0.4.0" + urix "^0.1.0" + +source-map-support@^0.4.15: + version "0.4.18" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.4.18.tgz#0286a6de8be42641338594e97ccea75f0a2c585f" + integrity sha512-try0/JqxPLF9nOjvSta7tVondkP5dwgyLDjVoyMDlmjugT2lRZ1OfsrYTkCd2hkDnJTKRbO/Rl3orm8vlsUzbA== + dependencies: + source-map "^0.5.6" + +source-map-support@^0.5.19, source-map-support@^0.5.3: + version "0.5.19" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.19.tgz#a98b62f86dcaf4f67399648c085291ab9e8fed61" + integrity sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw== + dependencies: + buffer-from "^1.0.0" + source-map "^0.6.0" + +source-map-url@^0.4.0: + version "0.4.1" + resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.4.1.tgz#0af66605a745a5a2f91cf1bbf8a7afbc283dec56" + integrity sha512-cPiFOTLUKvJFIg4SKVScy4ilPPW6rFgMgfuZJPNoDuMs3nC1HbMUycBoJw77xFIp6z1UJQJOfx6C9GMH80DiTw== + +source-map@^0.5.0, source-map@^0.5.6, source-map@^0.5.7: + version "0.5.7" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" + integrity sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w= + +source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.0, source-map@~0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" + integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== + +spark-md5@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/spark-md5/-/spark-md5-3.0.0.tgz#3722227c54e2faf24b1dc6d933cc144e6f71bfef" + integrity sha1-NyIifFTi+vJLHcbZM8wUTm9xv+8= + +spark-md5@3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/spark-md5/-/spark-md5-3.0.1.tgz#83a0e255734f2ab4e5c466e5a2cfc9ba2aa2124d" + integrity sha512-0tF3AGSD1ppQeuffsLDIOWlKUd3lS92tFxcsrh5Pe3ZphhnoK+oXIBTzOAThZCiuINZLvpiLH/1VS1/ANEJVig== + +spdx-correct@^3.0.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-3.1.1.tgz#dece81ac9c1e6713e5f7d1b6f17d468fa53d89a9" + integrity sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w== + dependencies: + spdx-expression-parse "^3.0.0" + spdx-license-ids "^3.0.0" + +spdx-exceptions@^2.1.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz#3f28ce1a77a00372683eade4a433183527a2163d" + integrity sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A== + +spdx-expression-parse@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz#cf70f50482eefdc98e3ce0a6833e4a53ceeba679" + integrity sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q== + dependencies: + spdx-exceptions "^2.1.0" + spdx-license-ids "^3.0.0" + +spdx-license-ids@^3.0.0: + version "3.0.7" + resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.7.tgz#e9c18a410e5ed7e12442a549fbd8afa767038d65" + integrity sha512-U+MTEOO0AiDzxwFvoa4JVnMV6mZlJKk2sBLt90s7G0Gd0Mlknc7kxEn3nuDPNZRta7O2uy8oLcZLVT+4sqNZHQ== + +split-ca@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/split-ca/-/split-ca-1.0.1.tgz#6c83aff3692fa61256e0cd197e05e9de157691a6" + integrity sha512-Q5thBSxp5t8WPTTJQS59LrGqOZqOsrhDGDVm8azCqIBjSBd7nd9o2PM+mDulQQkh8h//4U6hFZnc/mul8t5pWQ== + +split2@^3.1.0: + version "3.2.2" + resolved "https://registry.yarnpkg.com/split2/-/split2-3.2.2.tgz#bf2cf2a37d838312c249c89206fd7a17dd12365f" + integrity sha512-9NThjpgZnifTkJpzTZ7Eue85S49QwpNhZTq6GRJwObb6jnLFNGB7Qm73V5HewTROPyxD0C29xqmaI68bQtV+hg== + dependencies: + readable-stream "^3.0.0" + +sprintf-js@~1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" + integrity sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g== + +sqlite3@^4.0.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/sqlite3/-/sqlite3-4.2.0.tgz#49026d665e9fc4f922e56fb9711ba5b4c85c4901" + integrity sha512-roEOz41hxui2Q7uYnWsjMOTry6TcNUNmp8audCx18gF10P2NknwdpF+E+HKvz/F2NvPKGGBF4NGc+ZPQ+AABwg== + dependencies: + nan "^2.12.1" + node-pre-gyp "^0.11.0" + +sse-z@0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/sse-z/-/sse-z-0.3.0.tgz#e215db7c303d6c4a4199d80cb63811cc28fa55b9" + integrity sha512-jfcXynl9oAOS9YJ7iqS2JMUEHOlvrRAD+54CENiWnc4xsuVLQVSgmwf7cwOTcBd/uq3XkQKBGojgvEtVXcJ/8w== + +sshpk@^1.7.0: + version "1.17.0" + resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.17.0.tgz#578082d92d4fe612b13007496e543fa0fbcbe4c5" + integrity sha512-/9HIEs1ZXGhSPE8X6Ccm7Nam1z8KcoCqPdI7ecm1N33EzAetWahvQWVqLZtaZQ+IDKX4IyA2o0gBzqIMkAagHQ== + dependencies: + asn1 "~0.2.3" + assert-plus "^1.0.0" + bcrypt-pbkdf "^1.0.0" + dashdash "^1.12.0" + ecc-jsbn "~0.1.1" + getpass "^0.1.1" + jsbn "~0.1.0" + safer-buffer "^2.0.2" + tweetnacl "~0.14.0" + +stable@~0.1.8: + version "0.1.8" + resolved "https://registry.yarnpkg.com/stable/-/stable-0.1.8.tgz#836eb3c8382fe2936feaf544631017ce7d47a3cf" + integrity sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w== + +statuses@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/statuses/-/statuses-2.0.1.tgz#55cb000ccf1d48728bd23c685a063998cf1a1b63" + integrity sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ== + +"statuses@>= 1.5.0 < 2": + version "1.5.0" + resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.5.0.tgz#161c7dac177659fd9811f43771fa99381478628c" + integrity sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow= + +stoppable@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/stoppable/-/stoppable-1.1.0.tgz#32da568e83ea488b08e4d7ea2c3bcc9d75015d5b" + integrity sha512-KXDYZ9dszj6bzvnEMRYvxgeTHU74QBFL54XKtP3nyMuJ81CFYtABZ3bAzL2EdFUaEwJOBOgENyFj3R7oTzDyyw== + +stream-shift@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/stream-shift/-/stream-shift-1.0.1.tgz#d7088281559ab2778424279b0877da3c392d5a3d" + integrity sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ== + +stream-to-pull-stream@^1.7.2: + version "1.7.3" + resolved "https://registry.yarnpkg.com/stream-to-pull-stream/-/stream-to-pull-stream-1.7.3.tgz#4161aa2d2eb9964de60bfa1af7feaf917e874ece" + integrity sha512-6sNyqJpr5dIOQdgNy/xcDWwDuzAsAwVzhzrWlAPAQ7Lkjx/rv0wgvxEyKwTq6FmNd5rjTrELt/CLmaSw7crMGg== + dependencies: + looper "^3.0.0" + pull-stream "^3.2.3" + +streamsearch@0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/streamsearch/-/streamsearch-0.1.2.tgz#808b9d0e56fc273d809ba57338e929919a1a9f1a" + integrity sha1-gIudDlb8Jz2Am6VzOOkpkZoanxo= + +strict-uri-encode@^1.0.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz#279b225df1d582b1f54e65addd4352e18faa0713" + integrity sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM= + +string-width@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3" + integrity sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M= + dependencies: + code-point-at "^1.0.0" + is-fullwidth-code-point "^1.0.0" + strip-ansi "^3.0.0" + +"string-width@^1.0.2 || 2", string-width@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e" + integrity sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw== + dependencies: + is-fullwidth-code-point "^2.0.0" + strip-ansi "^4.0.0" + +string-width@^3.0.0, string-width@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-3.1.0.tgz#22767be21b62af1081574306f69ac51b62203961" + integrity sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w== + dependencies: + emoji-regex "^7.0.1" + is-fullwidth-code-point "^2.0.0" + strip-ansi "^5.1.0" + +string-width@^4.1.0, string-width@^4.2.0: + version "4.2.2" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.2.tgz#dafd4f9559a7585cfba529c6a0a4f73488ebd4c5" + integrity sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.0" + +string.prototype.trimend@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/string.prototype.trimend/-/string.prototype.trimend-1.0.4.tgz#e75ae90c2942c63504686c18b287b4a0b1a45f80" + integrity sha512-y9xCjw1P23Awk8EvTpcyL2NIr1j7wJ39f+k6lvRnSMz+mz9CGz9NYPelDk42kOz6+ql8xjfK8oYzy3jAP5QU5A== + dependencies: + call-bind "^1.0.2" + define-properties "^1.1.3" + +string.prototype.trimstart@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/string.prototype.trimstart/-/string.prototype.trimstart-1.0.4.tgz#b36399af4ab2999b4c9c648bd7a3fb2bb26feeed" + integrity sha512-jh6e984OBfvxS50tdY2nRZnoC5/mLFKOREQfw8t5yytkoUsJRNxvI/E39qu1sD0OtWI3OC0XgKSmcWwziwYuZw== + dependencies: + call-bind "^1.0.2" + define-properties "^1.1.3" + +string_decoder@^1.1.1, string_decoder@^1.2.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.3.0.tgz#42f114594a46cf1a8e30b0a84f56c78c3edac21e" + integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA== + dependencies: + safe-buffer "~5.2.0" + +string_decoder@~0.10.x: + version "0.10.31" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-0.10.31.tgz#62e203bc41766c6c28c9fc84301dab1c5310fa94" + integrity sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ== + +string_decoder@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8" + integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg== + dependencies: + safe-buffer "~5.1.0" + +strip-ansi@^3.0.0, strip-ansi@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf" + integrity sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8= + dependencies: + ansi-regex "^2.0.0" + +strip-ansi@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-4.0.0.tgz#a8479022eb1ac368a871389b635262c505ee368f" + integrity sha512-4XaJ2zQdCzROZDivEVIDPkcQn8LMFSa8kj8Gxb/Lnwzv9A8VctNZ+lfivC/sV3ivW8ElJTERXZoPBRrZKkNKow== + dependencies: + ansi-regex "^3.0.0" + +strip-ansi@^5.0.0, strip-ansi@^5.1.0, strip-ansi@^5.2.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-5.2.0.tgz#8c9a536feb6afc962bdfa5b104a5091c1ad9c0ae" + integrity sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA== + dependencies: + ansi-regex "^4.1.0" + +strip-ansi@^6.0.0: + version "6.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== + dependencies: + ansi-regex "^5.0.1" + +strip-bom-stream@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/strip-bom-stream/-/strip-bom-stream-1.0.0.tgz#e7144398577d51a6bed0fa1994fa05f43fd988ee" + integrity sha1-5xRDmFd9Uaa+0PoZlPoF9D/ZiO4= + dependencies: + first-chunk-stream "^1.0.0" + strip-bom "^2.0.0" + +strip-bom@2.X, strip-bom@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-2.0.0.tgz#6219a85616520491f35788bdbf1447a99c7e6b0e" + integrity sha1-YhmoVhZSBJHzV4i9vxRHqZx+aw4= + dependencies: + is-utf8 "^0.2.0" + +strip-final-newline@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/strip-final-newline/-/strip-final-newline-2.0.0.tgz#89b852fb2fcbe936f6f4b3187afb0a12c1ab58ad" + integrity sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA== + +strip-hex-prefix@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/strip-hex-prefix/-/strip-hex-prefix-1.0.0.tgz#0c5f155fef1151373377de9dbb588da05500e36f" + integrity sha512-q8d4ue7JGEiVcypji1bALTos+0pWtyGlivAWyPuTkHzuTCJqrK9sWxYQZUq6Nq3cuyv3bm734IhHvHtGGURU6A== + dependencies: + is-hex-prefixed "1.0.0" + +strip-indent@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/strip-indent/-/strip-indent-2.0.0.tgz#5ef8db295d01e6ed6cbf7aab96998d7822527b68" + integrity sha1-XvjbKV0B5u1sv3qrlpmNeCJSe2g= + +strip-json-comments@3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-3.0.1.tgz#85713975a91fb87bf1b305cca77395e40d2a64a7" + integrity sha512-VTyMAUfdm047mwKl+u79WIdrZxtFtn+nBxHeb844XBQ9uMNTuTHdx2hc5RiAJYqwTj3wc/xe5HLSdJSkJ+WfZw== + +strip-json-comments@~2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" + integrity sha1-PFMZQukIwml8DsNEhYwobHygpgo= + +sublevel-pouchdb@7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/sublevel-pouchdb/-/sublevel-pouchdb-7.2.2.tgz#49e46cd37883bf7ff5006d7c5b9bcc7bcc1f422f" + integrity sha512-y5uYgwKDgXVyPZceTDGWsSFAhpSddY29l9PJbXqMJLfREdPmQTY8InpatohlEfCXX7s1LGcrfYAhxPFZaJOLnQ== + dependencies: + inherits "2.0.4" + level-codec "9.0.2" + ltgt "2.2.1" + readable-stream "1.1.14" + +subscriptions-transport-ws@^0.9.19: + version "0.9.19" + resolved "https://registry.yarnpkg.com/subscriptions-transport-ws/-/subscriptions-transport-ws-0.9.19.tgz#10ca32f7e291d5ee8eb728b9c02e43c52606cdcf" + integrity sha512-dxdemxFFB0ppCLg10FTtRqH/31FNRL1y1BQv8209MK5I4CwALb7iihQg+7p65lFcIl8MHatINWBLOqpgU4Kyyw== + dependencies: + backo2 "^1.0.2" + eventemitter3 "^3.1.0" + iterall "^1.2.1" + symbol-observable "^1.0.4" + ws "^5.2.0 || ^6.0.0 || ^7.0.0" + +super-split@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/super-split/-/super-split-1.1.0.tgz#43b3ba719155f4d43891a32729d59b213d9155fc" + integrity sha512-I4bA5mgcb6Fw5UJ+EkpzqXfiuvVGS/7MuND+oBxNFmxu3ugLNrdIatzBLfhFRMVMLxgSsRy+TjIktgkF9RFSNQ== + +supports-color@7.1.0: + version "7.1.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.1.0.tgz#68e32591df73e25ad1c4b49108a2ec507962bfd1" + integrity sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g== + dependencies: + has-flag "^4.0.0" + +supports-color@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7" + integrity sha1-U10EXOa2Nj+kARcIRimZXp3zJMc= + +supports-color@^5.3.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" + integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow== + dependencies: + has-flag "^3.0.0" + +supports-color@^7.1.0: + version "7.2.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" + integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== + dependencies: + has-flag "^4.0.0" + +swap-case@^1.1.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/swap-case/-/swap-case-1.1.2.tgz#c39203a4587385fad3c850a0bd1bcafa081974e3" + integrity sha1-w5IDpFhzhfrTyFCgvRvK+ggZdOM= + dependencies: + lower-case "^1.1.1" + upper-case "^1.1.1" + +swarm-js@^0.1.40: + version "0.1.40" + resolved "https://registry.yarnpkg.com/swarm-js/-/swarm-js-0.1.40.tgz#b1bc7b6dcc76061f6c772203e004c11997e06b99" + integrity sha512-yqiOCEoA4/IShXkY3WKwP5PvZhmoOOD8clsKA7EEcRILMkTEYHCQ21HDCAcVpmIxZq4LyZvWeRJ6quIyHk1caA== + dependencies: + bluebird "^3.5.0" + buffer "^5.0.5" + eth-lib "^0.1.26" + fs-extra "^4.0.2" + got "^7.1.0" + mime-types "^2.1.16" + mkdirp-promise "^5.0.1" + mock-fs "^4.1.0" + setimmediate "^1.0.5" + tar "^4.0.2" + xhr-request "^1.0.1" + +symbol-observable@^1.0.3, symbol-observable@^1.0.4, symbol-observable@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/symbol-observable/-/symbol-observable-1.2.0.tgz#c22688aed4eab3cdc2dfeacbb561660560a00804" + integrity sha512-e900nM8RRtGhlV36KGEU9k65K3mPb1WV70OdjfxlG2EAuM1noi/E/BaW/uMhL7bPEssK8QV57vN3esixjUvcXQ== + +symbol-observable@^2.0.0: + version "2.0.3" + resolved "https://registry.yarnpkg.com/symbol-observable/-/symbol-observable-2.0.3.tgz#5b521d3d07a43c351055fa43b8355b62d33fd16a" + integrity sha512-sQV7phh2WCYAn81oAkakC5qjq2Ml0g8ozqz03wOGnx9dDlG1de6yrF+0RAzSJD8fPUow3PTSMf2SAbOGxb93BA== + +"symbol-tree@>= 3.1.0 < 4.0.0": + version "3.2.4" + resolved "https://registry.yarnpkg.com/symbol-tree/-/symbol-tree-3.2.4.tgz#430637d248ba77e078883951fb9aa0eed7c63fa2" + integrity sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw== + +symbol@^0.2.1: + version "0.2.3" + resolved "https://registry.yarnpkg.com/symbol/-/symbol-0.2.3.tgz#3b9873b8a901e47c6efe21526a3ac372ef28bbc7" + integrity sha1-O5hzuKkB5Hxu/iFSajrDcu8ou8c= + +sync-fetch@0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/sync-fetch/-/sync-fetch-0.3.0.tgz#77246da949389310ad978ab26790bb05f88d1335" + integrity sha512-dJp4qg+x4JwSEW1HibAuMi0IIrBI3wuQr2GimmqB7OXR50wmwzfdusG+p39R9w3R6aFtZ2mzvxvWKQ3Bd/vx3g== + dependencies: + buffer "^5.7.0" + node-fetch "^2.6.1" + +sync-request@6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/sync-request/-/sync-request-6.1.0.tgz#e96217565b5e50bbffe179868ba75532fb597e68" + integrity sha512-8fjNkrNlNCrVc/av+Jn+xxqfCjYaBoHqCsDz6mt030UMxJGr+GSfCV1dQt2gRtlL63+VPidwDVLr7V2OcTSdRw== + dependencies: + http-response-object "^3.0.1" + sync-rpc "^1.2.1" + then-request "^6.0.0" + +sync-rpc@^1.2.1: + version "1.3.6" + resolved "https://registry.yarnpkg.com/sync-rpc/-/sync-rpc-1.3.6.tgz#b2e8b2550a12ccbc71df8644810529deb68665a7" + integrity sha512-J8jTXuZzRlvU7HemDgHi3pGnh/rkoqR/OZSjhTyyZrEkkYQbk7Z33AXp37mkPfPpfdOuj7Ex3H/TJM1z48uPQw== + dependencies: + get-port "^3.1.0" + +taffydb@2.7.3: + version "2.7.3" + resolved "https://registry.yarnpkg.com/taffydb/-/taffydb-2.7.3.tgz#2ad37169629498fca5bc84243096d3cde0ec3a34" + integrity sha1-KtNxaWKUmPylvIQkMJbTzeDsOjQ= + +tar-fs@~1.16.3: + version "1.16.3" + resolved "https://registry.yarnpkg.com/tar-fs/-/tar-fs-1.16.3.tgz#966a628841da2c4010406a82167cbd5e0c72d509" + integrity sha512-NvCeXpYx7OsmOh8zIOP/ebG55zZmxLE0etfWRbWok+q2Qo8x/vOR/IJT1taADXPe+jsiu9axDb3X4B+iIgNlKw== + dependencies: + chownr "^1.0.1" + mkdirp "^0.5.1" + pump "^1.0.0" + tar-stream "^1.1.2" + +tar-stream@^1.1.2: + version "1.6.2" + resolved "https://registry.yarnpkg.com/tar-stream/-/tar-stream-1.6.2.tgz#8ea55dab37972253d9a9af90fdcd559ae435c555" + integrity sha512-rzS0heiNf8Xn7/mpdSVVSMAWAoy9bfb1WOTYC78Z0UQKeKa/CWS8FOq0lKGNa8DWKAn9gxjCvMLYc5PGXYlK2A== + dependencies: + bl "^1.0.0" + buffer-alloc "^1.2.0" + end-of-stream "^1.0.0" + fs-constants "^1.0.0" + readable-stream "^2.3.0" + to-buffer "^1.1.1" + xtend "^4.0.0" + +tar-stream@^2.0.1: + version "2.2.0" + resolved "https://registry.yarnpkg.com/tar-stream/-/tar-stream-2.2.0.tgz#acad84c284136b060dc3faa64474aa9aebd77287" + integrity sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ== + dependencies: + bl "^4.0.3" + end-of-stream "^1.4.1" + fs-constants "^1.0.0" + inherits "^2.0.3" + readable-stream "^3.1.1" + +tar@^4, tar@^4.0.2: + version "4.4.19" + resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.19.tgz#2e4d7263df26f2b914dee10c825ab132123742f3" + integrity sha512-a20gEsvHnWe0ygBY8JbxoM4w3SJdhc7ZAuxkLqh+nvNQN2IOt0B5lLgM490X5Hl8FF0dl0tOf2ewFYAlIFgzVA== + dependencies: + chownr "^1.1.4" + fs-minipass "^1.2.7" + minipass "^2.9.0" + minizlib "^1.3.3" + mkdirp "^0.5.5" + safe-buffer "^5.2.1" + yallist "^3.1.1" + +tar@^6.1.0: + version "6.1.11" + resolved "https://registry.yarnpkg.com/tar/-/tar-6.1.11.tgz#6760a38f003afa1b2ffd0ffe9e9abbd0eab3d621" + integrity sha512-an/KZQzQUkZCkuoAA64hM92X0Urb6VpRhAFllDzz44U2mcD5scmT3zBc4VgVpkugF580+DQn8eAFSyoQt0tznA== + dependencies: + chownr "^2.0.0" + fs-minipass "^2.0.0" + minipass "^3.0.0" + minizlib "^2.1.1" + mkdirp "^1.0.3" + yallist "^4.0.0" + +then-request@^6.0.0: + version "6.0.2" + resolved "https://registry.yarnpkg.com/then-request/-/then-request-6.0.2.tgz#ec18dd8b5ca43aaee5cb92f7e4c1630e950d4f0c" + integrity sha512-3ZBiG7JvP3wbDzA9iNY5zJQcHL4jn/0BWtXIkagfz7QgOL/LqjCEOBQuJNZfu0XYnv5JhKh+cDxCPM4ILrqruA== + dependencies: + "@types/concat-stream" "^1.6.0" + "@types/form-data" "0.0.33" + "@types/node" "^8.0.0" + "@types/qs" "^6.2.31" + caseless "~0.12.0" + concat-stream "^1.6.0" + form-data "^2.2.0" + http-basic "^8.1.1" + http-response-object "^3.0.1" + promise "^8.0.0" + qs "^6.4.0" + +through2-filter@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/through2-filter/-/through2-filter-2.0.0.tgz#60bc55a0dacb76085db1f9dae99ab43f83d622ec" + integrity sha1-YLxVoNrLdghdsfna6Zq0P4PWIuw= + dependencies: + through2 "~2.0.0" + xtend "~4.0.0" + +through2-filter@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/through2-filter/-/through2-filter-3.0.0.tgz#700e786df2367c2c88cd8aa5be4cf9c1e7831254" + integrity sha512-jaRjI2WxN3W1V8/FMZ9HKIBXixtiqs3SQSX4/YGIiP3gL6djW48VoZq9tDqeCWs3MT8YY5wb/zli8VW8snY1CA== + dependencies: + through2 "~2.0.0" + xtend "~4.0.0" + +through2@2.X, through2@^2.0.0, through2@^2.0.3, through2@~2.0.0: + version "2.0.5" + resolved "https://registry.yarnpkg.com/through2/-/through2-2.0.5.tgz#01c1e39eb31d07cb7d03a96a70823260b23132cd" + integrity sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ== + dependencies: + readable-stream "~2.3.6" + xtend "~4.0.1" + +through2@3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/through2/-/through2-3.0.1.tgz#39276e713c3302edf9e388dd9c812dd3b825bd5a" + integrity sha512-M96dvTalPT3YbYLaKaCuwu+j06D/8Jfib0o/PxbVt6Amhv3dUAtW6rTV1jPgJSBG83I/e04Y6xkVdVhSRhi0ww== + dependencies: + readable-stream "2 || 3" + +through2@3.0.2, through2@^3.0.0, through2@^3.0.1: + version "3.0.2" + resolved "https://registry.yarnpkg.com/through2/-/through2-3.0.2.tgz#99f88931cfc761ec7678b41d5d7336b5b6a07bf4" + integrity sha512-enaDQ4MUyP2W6ZyT6EsMzqBPZaM/avg8iuo+l2d3QCs0J+6RaqkHV/2/lOwDTueBHeJ/2LG9lrLW3d5rWPucuQ== + dependencies: + inherits "^2.0.4" + readable-stream "2 || 3" + +through2@^0.6.0: + version "0.6.5" + resolved "https://registry.yarnpkg.com/through2/-/through2-0.6.5.tgz#41ab9c67b29d57209071410e1d7a7a968cd3ad48" + integrity sha1-QaucZ7KdVyCQcUEOHXp6lozTrUg= + dependencies: + readable-stream ">=1.0.33-1 <1.1.0-0" + xtend ">=4.0.0 <4.1.0-0" + +"through@>=2.2.7 <3": + version "2.3.8" + resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" + integrity sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg== + +tildify@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/tildify/-/tildify-1.2.0.tgz#dcec03f55dca9b7aa3e5b04f21817eb56e63588a" + integrity sha1-3OwD9V3Km3qj5bBPIYF+tW5jWIo= + dependencies: + os-homedir "^1.0.0" + +timed-out@^4.0.0, timed-out@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/timed-out/-/timed-out-4.0.1.tgz#f32eacac5a175bea25d7fab565ab3ed8741ef56f" + integrity sha1-8y6srFoXW+ol1/q1Zas+2HQe9W8= + +tiny-queue@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/tiny-queue/-/tiny-queue-0.2.1.tgz#25a67f2c6e253b2ca941977b5ef7442ef97a6046" + integrity sha1-JaZ/LG4lOyypQZd7XvdELvl6YEY= + +title-case@^2.1.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/title-case/-/title-case-2.1.1.tgz#3e127216da58d2bc5becf137ab91dae3a7cd8faa" + integrity sha1-PhJyFtpY0rxb7PE3q5Ha46fNj6o= + dependencies: + no-case "^2.2.0" + upper-case "^1.0.3" + +tmp-promise@3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/tmp-promise/-/tmp-promise-3.0.2.tgz#6e933782abff8b00c3119d63589ca1fb9caaa62a" + integrity sha512-OyCLAKU1HzBjL6Ev3gxUeraJNlbNingmi8IrHHEsYH8LTmEuhvYfqvhn2F/je+mjf4N58UmZ96OMEy1JanSCpA== + dependencies: + tmp "^0.2.0" + +tmp@0.0.33: + version "0.0.33" + resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.33.tgz#6d34335889768d21b2bcda0aa277ced3b1bfadf9" + integrity sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw== + dependencies: + os-tmpdir "~1.0.2" + +tmp@^0.2.0: + version "0.2.1" + resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.2.1.tgz#8457fc3037dcf4719c251367a1af6500ee1ccf14" + integrity sha512-76SUhtfqR2Ijn+xllcI5P1oyannHNHByD80W1q447gU3mp9G9PSpGdWmjUOHRDPiHYacIk66W7ubDTuPF3BEtQ== + dependencies: + rimraf "^3.0.0" + +to-absolute-glob@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/to-absolute-glob/-/to-absolute-glob-0.1.1.tgz#1cdfa472a9ef50c239ee66999b662ca0eb39937f" + integrity sha1-HN+kcqnvUMI57maZm2YsoOs5k38= + dependencies: + extend-shallow "^2.0.1" + +to-buffer@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/to-buffer/-/to-buffer-1.1.1.tgz#493bd48f62d7c43fcded313a03dcadb2e1213a80" + integrity sha512-lx9B5iv7msuFYE3dytT+KE5tap+rNYw+K4jVkb9R/asAb+pbBSM17jtunHplhBe6RRJdZx3Pn2Jph24O32mOVg== + +to-fast-properties@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-1.0.3.tgz#b83571fa4d8c25b82e231b06e3a3055de4ca1a47" + integrity sha1-uDVx+k2MJbguIxsG46MFXeTKGkc= + +to-fast-properties@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz#dc5e698cbd079265bc73e0377681a4e4e83f616e" + integrity sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4= + +to-readable-stream@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/to-readable-stream/-/to-readable-stream-1.0.0.tgz#ce0aa0c2f3df6adf852efb404a783e77c0475771" + integrity sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q== + +to-regex-range@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4" + integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ== + dependencies: + is-number "^7.0.0" + +toidentifier@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/toidentifier/-/toidentifier-1.0.0.tgz#7e1be3470f1e77948bc43d94a3c8f4d7752ba553" + integrity sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw== + +toidentifier@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/toidentifier/-/toidentifier-1.0.1.tgz#3be34321a88a820ed1bd80dfaa33e479fbb8dd35" + integrity sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA== + +tough-cookie@^2.2.0, tough-cookie@^2.3.1, tough-cookie@~2.5.0: + version "2.5.0" + resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.5.0.tgz#cd9fb2a0aa1d5a12b473bd9fb96fa3dcff65ade2" + integrity sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g== + dependencies: + psl "^1.1.28" + punycode "^2.1.1" + +"tough-cookie@^2.3.3 || ^3.0.1 || ^4.0.0": + version "4.0.0" + resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-4.0.0.tgz#d822234eeca882f991f0f908824ad2622ddbece4" + integrity sha512-tHdtEpQCMrc1YLrMaqXXcj6AxhYi/xgit6mZu1+EDWUn+qhUf8wMQoFIy9NXuq23zAwtcB0t/MjACGR18pcRbg== + dependencies: + psl "^1.1.33" + punycode "^2.1.1" + universalify "^0.1.2" + +tr46@~0.0.1, tr46@~0.0.3: + version "0.0.3" + resolved "https://registry.yarnpkg.com/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a" + integrity sha1-gYT9NH2snNwYWZLzpmIuFLnZq2o= + +trim-right@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/trim-right/-/trim-right-1.0.1.tgz#cb2e1203067e0c8de1f614094b9fe45704ea6003" + integrity sha1-yy4SAwZ+DI3h9hQJS5/kVwTqYAM= + +truffle@^5.2: + version "5.2.3" + resolved "https://registry.yarnpkg.com/truffle/-/truffle-5.2.3.tgz#6c1585da56b704397017833ea6b62e18303b924f" + integrity sha512-iOeOSaCZtQ+TWsEh6yc6Al+RVkWTsJQnceXNYSCYR86QcXssGY5CqDQ2JwIxwAN4YMRf4GZ/LRAPul6qX36b6A== + dependencies: + "@truffle/debugger" "^8.0.17" + app-module-path "^2.2.0" + mocha "8.1.2" + original-require "^1.0.1" + optionalDependencies: + "@truffle/db" "^0.5.3" + +ts-invariant@^0.4.0: + version "0.4.4" + resolved "https://registry.yarnpkg.com/ts-invariant/-/ts-invariant-0.4.4.tgz#97a523518688f93aafad01b0e80eb803eb2abd86" + integrity sha512-uEtWkFM/sdZvRNNDL3Ehu4WVpwaulhwQszV8mrtcdeE8nN00BV9mAmQ88RkrBhFgl9gMgvjJLAQcZbnPXI9mlA== + dependencies: + tslib "^1.9.3" + +ts-invariant@^0.6.0: + version "0.6.1" + resolved "https://registry.yarnpkg.com/ts-invariant/-/ts-invariant-0.6.1.tgz#eb4c52b45daaca8367abbfd6cff998ea871d592d" + integrity sha512-QQgN33g8E8yrdDuH29HASveLtbzMnRRgWh0i/JNTW4+zcLsdIOnfsgEDi/NKx4UckQyuMFt9Ujm6TWLWQ58Kvg== + dependencies: + "@types/ungap__global-this" "^0.3.1" + "@ungap/global-this" "^0.4.2" + tslib "^1.9.3" + +tslib@^1.10.0, tslib@^1.14.1, tslib@^1.9.3: + version "1.14.1" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00" + integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg== + +tslib@^2.0.3, tslib@~2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.1.0.tgz#da60860f1c2ecaa5703ab7d39bc05b6bf988b97a" + integrity sha512-hcVC3wYEziELGGmEEXue7D75zbwIIVUMWAVbHItGPx0ziyXxrOMQx4rQEVEV45Ut/1IotuEvwqPopzIOkDMf0A== + +tslib@^2.1.0: + version "2.3.1" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.3.1.tgz#e8a335add5ceae51aa261d32a490158ef042ef01" + integrity sha512-77EbyPPpMz+FRFRuAFlWMtmgUWGe9UOG2Z25NqCwiIjRhOf5iKGuzSe5P2w1laq+FkRy4p+PCuVkJSGkzTEKVw== + +tslib@~2.0.1: + version "2.0.3" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.0.3.tgz#8e0741ac45fc0c226e58a17bfc3e64b9bc6ca61c" + integrity sha512-uZtkfKblCEQtZKBF6EBXVZeQNl82yqtDQdv+eck8u7tdPxjLu2/lp5/uPW+um2tpuxINHWy3GhiccY7QgEaVHQ== + +tunnel-agent@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd" + integrity sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w== + dependencies: + safe-buffer "^5.0.1" + +tweetnacl@^0.14.3, tweetnacl@~0.14.0: + version "0.14.5" + resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" + integrity sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA== + +tweetnacl@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-1.0.3.tgz#ac0af71680458d8a6378d0d0d050ab1407d35596" + integrity sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw== + +type-check@~0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72" + integrity sha1-WITKtRLPHTVeP7eE8wgEsrUg23I= + dependencies: + prelude-ls "~1.1.2" + +type-is@^1.6.16, type-is@~1.6.18: + version "1.6.18" + resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.18.tgz#4e552cd05df09467dcbc4ef739de89f2cf37c131" + integrity sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g== + dependencies: + media-typer "0.3.0" + mime-types "~2.1.24" + +type@^1.0.1: + version "1.2.0" + resolved "https://registry.yarnpkg.com/type/-/type-1.2.0.tgz#848dd7698dafa3e54a6c479e759c4bc3f18847a0" + integrity sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg== + +type@^2.0.0: + version "2.5.0" + resolved "https://registry.yarnpkg.com/type/-/type-2.5.0.tgz#0a2e78c2e77907b252abe5f298c1b01c63f0db3d" + integrity sha512-180WMDQaIMm3+7hGXWf12GtdniDEy7nYcyFMKJn/eZz/6tSLXrUN9V0wKSbMjej0I1WHWbpREDEKHtqPQa9NNw== + +typedarray-to-buffer@^3.1.5, typedarray-to-buffer@~3.1.5: + version "3.1.5" + resolved "https://registry.yarnpkg.com/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz#a97ee7a9ff42691b9f783ff1bc5112fe3fca9080" + integrity sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q== + dependencies: + is-typedarray "^1.0.0" + +typedarray@^0.0.6, typedarray@~0.0.5: + version "0.0.6" + resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777" + integrity sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c= + +typescript-compare@^0.0.2: + version "0.0.2" + resolved "https://registry.yarnpkg.com/typescript-compare/-/typescript-compare-0.0.2.tgz#7ee40a400a406c2ea0a7e551efd3309021d5f425" + integrity sha512-8ja4j7pMHkfLJQO2/8tut7ub+J3Lw2S3061eJLFQcvs3tsmJKp8KG5NtpLn7KcY2w08edF74BSVN7qJS0U6oHA== + dependencies: + typescript-logic "^0.0.0" + +typescript-logic@^0.0.0: + version "0.0.0" + resolved "https://registry.yarnpkg.com/typescript-logic/-/typescript-logic-0.0.0.tgz#66ebd82a2548f2b444a43667bec120b496890196" + integrity sha512-zXFars5LUkI3zP492ls0VskH3TtdeHCqu0i7/duGt60i5IGPIpAHE/DWo5FqJ6EjQ15YKXrt+AETjv60Dat34Q== + +typescript-tuple@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/typescript-tuple/-/typescript-tuple-2.2.1.tgz#7d9813fb4b355f69ac55032e0363e8bb0f04dad2" + integrity sha512-Zcr0lbt8z5ZdEzERHAMAniTiIKerFCMgd7yjq1fPnDJ43et/k9twIFQMUYff9k5oXcsQ0WpvFcgzK2ZKASoW6Q== + dependencies: + typescript-compare "^0.0.2" + +ua-parser-js@^0.7.18: + version "0.7.24" + resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.24.tgz#8d3ecea46ed4f1f1d63ec25f17d8568105dc027c" + integrity sha512-yo+miGzQx5gakzVK3QFfN0/L9uVhosXBBO7qmnk7c2iw1IhL212wfA3zbnI54B0obGwC/5NWub/iT9sReMx+Fw== + +ultron@~1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/ultron/-/ultron-1.1.1.tgz#9fe1536a10a664a65266a1e3ccf85fd36302bc9c" + integrity sha512-UIEXBNeYmKptWH6z8ZnqTeS8fV74zG0/eRU9VGkpzz+LIJNs8W/zM/L+7ctCkRrgbNnnR0xxw4bKOr0cW0N0Og== + +unbox-primitive@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unbox-primitive/-/unbox-primitive-1.0.0.tgz#eeacbc4affa28e9b3d36b5eaeccc50b3251b1d3f" + integrity sha512-P/51NX+JXyxK/aigg1/ZgyccdAxm5K1+n8+tvqSntjOivPt19gvm1VC49RWYetsiub8WViUchdxl/KWHHB0kzA== + dependencies: + function-bind "^1.1.1" + has-bigints "^1.0.0" + has-symbols "^1.0.0" + which-boxed-primitive "^1.0.1" + +unbox-primitive@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/unbox-primitive/-/unbox-primitive-1.0.1.tgz#085e215625ec3162574dc8859abee78a59b14471" + integrity sha512-tZU/3NqK3dA5gpE1KtyiJUrEB0lxnGkMFHptJ7q6ewdZ8s12QrODwNbhIJStmJkd1QDXa1NRA8aF2A1zk/Ypyw== + dependencies: + function-bind "^1.1.1" + has-bigints "^1.0.1" + has-symbols "^1.0.2" + which-boxed-primitive "^1.0.2" + +underscore@1.9.1: + version "1.9.1" + resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.9.1.tgz#06dce34a0e68a7babc29b365b8e74b8925203961" + integrity sha512-5/4etnCkd9c8gwgowi5/om/mYO5ajCaOgdzj/oW+0eQV9WxKBDZw5+ycmKmeaTXjInS/W0BzpGLo2xR2aBwZdg== + +underscore@^1.8.3: + version "1.12.0" + resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.12.0.tgz#4814940551fc80587cef7840d1ebb0f16453be97" + integrity sha512-21rQzss/XPMjolTiIezSu3JAjgagXKROtNrYFEOWK109qY1Uv2tVjPTZ1ci2HgvQDA16gHYSthQIJfB+XId/rQ== + +unique-by@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unique-by/-/unique-by-1.0.0.tgz#5220c86ba7bc572fb713ad74651470cb644212bd" + integrity sha512-rJRXK5V0zL6TiSzhoGNpJp5dr+TZBLoPJFC06rLn17Ug++7Aa0Qnve5v+skXeQxx6/sI7rBsSesa6MAcmFi8Ew== + +unique-stream@^2.0.2: + version "2.3.1" + resolved "https://registry.yarnpkg.com/unique-stream/-/unique-stream-2.3.1.tgz#c65d110e9a4adf9a6c5948b28053d9a8d04cbeac" + integrity sha512-2nY4TnBE70yoxHkDli7DMazpWiP7xMdCYqU2nBRO0UB+ZpEkGsSija7MvmvnZFUeC+mrgiUfcHSr3LmRFIg4+A== + dependencies: + json-stable-stringify-without-jsonify "^1.0.1" + through2-filter "^3.0.0" + +unique-string@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unique-string/-/unique-string-1.0.0.tgz#9e1057cca851abb93398f8b33ae187b99caec11a" + integrity sha1-nhBXzKhRq7kzmPizOuGHuZyuwRo= + dependencies: + crypto-random-string "^1.0.0" + +universalify@^0.1.0, universalify@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.1.2.tgz#b646f69be3942dabcecc9d6639c80dc105efaa66" + integrity sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg== + +universalify@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/universalify/-/universalify-1.0.0.tgz#b61a1da173e8435b2fe3c67d29b9adf8594bd16d" + integrity sha512-rb6X1W158d7pRQBg5gkR8uPaSfiids68LTJQYOtEUhoJUWBdaQHsuT/EUduxXYxcrt4r5PJ4fuHW1MHT6p0qug== + +universalify@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/universalify/-/universalify-2.0.0.tgz#75a4984efedc4b08975c5aeb73f530d02df25717" + integrity sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ== + +unixify@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unixify/-/unixify-1.0.0.tgz#3a641c8c2ffbce4da683a5c70f03a462940c2090" + integrity sha1-OmQcjC/7zk2mg6XHDwOkYpQMIJA= + dependencies: + normalize-path "^2.1.1" + +unpipe@1.0.0, unpipe@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec" + integrity sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ== + +upper-case-first@^1.1.0, upper-case-first@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/upper-case-first/-/upper-case-first-1.1.2.tgz#5d79bedcff14419518fd2edb0a0507c9b6859115" + integrity sha1-XXm+3P8UQZUY/S7bCgUHybaFkRU= + dependencies: + upper-case "^1.1.1" + +upper-case@^1.0.3, upper-case@^1.1.0, upper-case@^1.1.1, upper-case@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/upper-case/-/upper-case-1.1.3.tgz#f6b4501c2ec4cdd26ba78be7222961de77621598" + integrity sha1-9rRQHC7EzdJrp4vnIilh3ndiFZg= + +uri-js@^4.2.2: + version "4.4.1" + resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.4.1.tgz#9b1a52595225859e55f669d928f88c6c57f2a77e" + integrity sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg== + dependencies: + punycode "^2.1.0" + +urix@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/urix/-/urix-0.1.0.tgz#da937f7a62e21fec1fd18d49b35c2935067a6c72" + integrity sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI= + +url-parse-lax@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-1.0.0.tgz#7af8f303645e9bd79a272e7a14ac68bc0609da73" + integrity sha1-evjzA2Rem9eaJy56FKxovAYJ2nM= + dependencies: + prepend-http "^1.0.1" + +url-parse-lax@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-3.0.0.tgz#16b5cafc07dbe3676c1b1999177823d6503acb0c" + integrity sha1-FrXK/Afb42dsGxmZF3gj1lA6yww= + dependencies: + prepend-http "^2.0.0" + +url-parse@^1.4.3: + version "1.5.10" + resolved "https://registry.yarnpkg.com/url-parse/-/url-parse-1.5.10.tgz#9d3c2f736c1d75dd3bd2be507dcc111f1e2ea9c1" + integrity sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ== + dependencies: + querystringify "^2.1.1" + requires-port "^1.0.0" + +url-set-query@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/url-set-query/-/url-set-query-1.0.0.tgz#016e8cfd7c20ee05cafe7795e892bd0702faa339" + integrity sha1-AW6M/Xwg7gXK/neV6JK9BwL6ozk= + +url-to-options@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/url-to-options/-/url-to-options-1.0.1.tgz#1505a03a289a48cbd7a434efbaeec5055f5633a9" + integrity sha1-FQWgOiiaSMvXpDTvuu7FBV9WM6k= + +ursa-optional@~0.10.0: + version "0.10.2" + resolved "https://registry.yarnpkg.com/ursa-optional/-/ursa-optional-0.10.2.tgz#bd74e7d60289c22ac2a69a3c8dea5eb2817f9681" + integrity sha512-TKdwuLboBn7M34RcvVTuQyhvrA8gYKapuVdm0nBP0mnBc7oECOfUQZrY91cefL3/nm64ZyrejSRrhTVdX7NG/A== + dependencies: + bindings "^1.5.0" + nan "^2.14.2" + +utf-8-validate@^5.0.2: + version "5.0.4" + resolved "https://registry.yarnpkg.com/utf-8-validate/-/utf-8-validate-5.0.4.tgz#72a1735983ddf7a05a43a9c6b67c5ce1c910f9b8" + integrity sha512-MEF05cPSq3AwJ2C7B7sHAA6i53vONoZbMGX8My5auEVm6W+dJ2Jd/TZPyGJ5CH42V2XtbI5FD28HeHeqlPzZ3Q== + dependencies: + node-gyp-build "^4.2.0" + +utf8@3.0.0, utf8@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/utf8/-/utf8-3.0.0.tgz#f052eed1364d696e769ef058b183df88c87f69d1" + integrity sha512-E8VjFIQ/TyQgp+TZfS6l8yp/xWppSAHzidGiRrqe4bK4XP9pTRyKFgGJpO3SN7zdX4DeomTrwaseCHovfpFcqQ== + +util-deprecate@^1.0.1, util-deprecate@~1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" + integrity sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw== + +util.promisify@^1.0.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/util.promisify/-/util.promisify-1.1.1.tgz#77832f57ced2c9478174149cae9b96e9918cd54b" + integrity sha512-/s3UsZUrIfa6xDhr7zZhnE9SLQ5RIXyYfiVnMMyMDzOc8WhWN4Nbh36H842OyurKbCDAesZOJaVyvmSl6fhGQw== + dependencies: + call-bind "^1.0.0" + define-properties "^1.1.3" + for-each "^0.3.3" + has-symbols "^1.0.1" + object.getownpropertydescriptors "^2.1.1" + +util@^0.12.0: + version "0.12.3" + resolved "https://registry.yarnpkg.com/util/-/util-0.12.3.tgz#971bb0292d2cc0c892dab7c6a5d37c2bec707888" + integrity sha512-I8XkoQwE+fPQEhy9v012V+TSdH2kp9ts29i20TaaDUXsg7x/onePbhFJUExBfv/2ay1ZOp/Vsm3nDlmnFGSAog== + dependencies: + inherits "^2.0.3" + is-arguments "^1.0.4" + is-generator-function "^1.0.7" + is-typed-array "^1.1.3" + safe-buffer "^5.1.2" + which-typed-array "^1.1.2" + +utils-merge@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.1.tgz#9f95710f50a267947b2ccc124741c1028427e713" + integrity sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA== + +uuid@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-2.0.1.tgz#c2a30dedb3e535d72ccf82e343941a50ba8533ac" + integrity sha1-wqMN7bPlNdcsz4LjQ5QaULqFM6w= + +uuid@3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.2.1.tgz#12c528bb9d58d0b9265d9a2f6f0fe8be17ff1f14" + integrity sha512-jZnMwlb9Iku/O3smGWvZhauCf6cvvpKi4BKRiliS3cxnI+Gz9j5MEpTz2UFuXiKPJocb7gnsLHwiS05ige5BEA== + +uuid@3.3.2: + version "3.3.2" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.3.2.tgz#1b4af4955eb3077c501c23872fc6513811587131" + integrity sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA== + +uuid@8.1.0: + version "8.1.0" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-8.1.0.tgz#6f1536eb43249f473abc6bd58ff983da1ca30d8d" + integrity sha512-CI18flHDznR0lq54xBycOVmphdCYnQLKn8abKn7PXUiKUGdEd+/l9LWNJmugXel4hXq7S+RMNl34ecyC9TntWg== + +uuid@^3.1.0, uuid@^3.3.2: + version "3.4.0" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.4.0.tgz#b23e4358afa8a202fe7a100af1f5f883f02007ee" + integrity sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A== + +uuid@^8.0.0, uuid@^8.3.2: + version "8.3.2" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-8.3.2.tgz#80d5b5ced271bb9af6c445f21a1a04c606cefbe2" + integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg== + +vali-date@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/vali-date/-/vali-date-1.0.0.tgz#1b904a59609fb328ef078138420934f6b86709a6" + integrity sha1-G5BKWWCfsyjvB4E4Qgk09rhnCaY= + +valid-url@1.0.9: + version "1.0.9" + resolved "https://registry.yarnpkg.com/valid-url/-/valid-url-1.0.9.tgz#1c14479b40f1397a75782f115e4086447433a200" + integrity sha1-HBRHm0DxOXp1eC8RXkCGRHQzogA= + +validate-npm-package-license@^3.0.1: + version "3.0.4" + resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz#fc91f6b9c7ba15c857f4cb2c5defeec39d4f410a" + integrity sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew== + dependencies: + spdx-correct "^3.0.0" + spdx-expression-parse "^3.0.0" + +varint@^5.0.0, varint@~5.0.0: + version "5.0.2" + resolved "https://registry.yarnpkg.com/varint/-/varint-5.0.2.tgz#5b47f8a947eb668b848e034dcfa87d0ff8a7f7a4" + integrity sha512-lKxKYG6H03yCZUpAGOPOsMcGxd1RHCu1iKvEHYDPmTyq2HueGhD73ssNBqqQWfvYs04G9iUFRvmAVLW20Jw6ow== + +vary@^1, vary@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc" + integrity sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg== + +verror@1.10.0: + version "1.10.0" + resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400" + integrity sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw== + dependencies: + assert-plus "^1.0.0" + core-util-is "1.0.2" + extsprintf "^1.2.0" + +vinyl-fs@2.4.3: + version "2.4.3" + resolved "https://registry.yarnpkg.com/vinyl-fs/-/vinyl-fs-2.4.3.tgz#3d97e562ebfdd4b66921dea70626b84bde9d2d07" + integrity sha1-PZflYuv91LZpId6nBia4S96dLQc= + dependencies: + duplexify "^3.2.0" + glob-stream "^5.3.2" + graceful-fs "^4.0.0" + gulp-sourcemaps "^1.5.2" + is-valid-glob "^0.3.0" + lazystream "^1.0.0" + lodash.isequal "^4.0.0" + merge-stream "^1.0.0" + mkdirp "^0.5.0" + object-assign "^4.0.0" + readable-stream "^2.0.4" + strip-bom "^2.0.0" + strip-bom-stream "^1.0.0" + through2 "^2.0.0" + through2-filter "^2.0.0" + vali-date "^1.0.0" + vinyl "^1.0.0" + +vinyl@1.X, vinyl@^1.0.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/vinyl/-/vinyl-1.2.0.tgz#5c88036cf565e5df05558bfc911f8656df218884" + integrity sha1-XIgDbPVl5d8FVYv8kR+GVt8hiIQ= + dependencies: + clone "^1.0.0" + clone-stats "^0.0.1" + replace-ext "0.0.1" + +vuvuzela@1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/vuvuzela/-/vuvuzela-1.0.3.tgz#3be145e58271c73ca55279dd851f12a682114b0b" + integrity sha1-O+FF5YJxxzylUnndhR8SpoIRSws= + +wcwidth@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/wcwidth/-/wcwidth-1.0.1.tgz#f0b0dcf915bc5ff1528afadb2c0e17b532da2fe8" + integrity sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg== + dependencies: + defaults "^1.0.3" + +web3-bzz@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-bzz/-/web3-bzz-1.2.9.tgz#25f8a373bc2dd019f47bf80523546f98b93c8790" + integrity sha512-ogVQr9jHodu9HobARtvUSmWG22cv2EUQzlPeejGWZ7j5h20HX40EDuWyomGY5VclIj5DdLY76Tmq88RTf/6nxA== + dependencies: + "@types/node" "^10.12.18" + got "9.6.0" + swarm-js "^0.1.40" + underscore "1.9.1" + +web3-bzz@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3-bzz/-/web3-bzz-1.3.4.tgz#9be529353c4063bc68395370cb5d8e414c6b6c87" + integrity sha512-DBRVQB8FAgoAtZCpp2GAGPCJjgBgsuwOKEasjV044AAZiONpXcKHbkO6G1SgItIixnrJsRJpoGLGw52Byr6FKw== + dependencies: + "@types/node" "^12.12.6" + got "9.6.0" + swarm-js "^0.1.40" + underscore "1.9.1" + +web3-core-helpers@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-core-helpers/-/web3-core-helpers-1.2.9.tgz#6381077c3e01c127018cb9e9e3d1422697123315" + integrity sha512-t0WAG3orLCE3lqi77ZoSRNFok3VQWZXTniZigDQjyOJYMAX7BU3F3js8HKbjVnAxlX3tiKoDxI0KBk9F3AxYuw== + dependencies: + underscore "1.9.1" + web3-eth-iban "1.2.9" + web3-utils "1.2.9" + +web3-core-helpers@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3-core-helpers/-/web3-core-helpers-1.3.4.tgz#b8549740bf24d5c71688d89c3cdd802d8d36b4e4" + integrity sha512-n7BqDalcTa1stncHMmrnFtyTgDhX5Fy+avNaHCf6qcOP2lwTQC8+mdHVBONWRJ6Yddvln+c8oY/TAaB6PzWK0A== + dependencies: + underscore "1.9.1" + web3-eth-iban "1.3.4" + web3-utils "1.3.4" + +web3-core-method@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-core-method/-/web3-core-method-1.2.9.tgz#3fb538751029bea570e4f86731e2fa5e4945e462" + integrity sha512-bjsIoqP3gs7A/gP8+QeLUCyOKJ8bopteCSNbCX36Pxk6TYfYWNuC6hP+2GzUuqdP3xaZNe+XEElQFUNpR3oyAg== + dependencies: + "@ethersproject/transactions" "^5.0.0-beta.135" + underscore "1.9.1" + web3-core-helpers "1.2.9" + web3-core-promievent "1.2.9" + web3-core-subscriptions "1.2.9" + web3-utils "1.2.9" + +web3-core-method@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3-core-method/-/web3-core-method-1.3.4.tgz#6c2812d96dd6c811b9e6c8a5d25050d2c22b9527" + integrity sha512-JxmQrujsAWYRRN77P/RY7XuZDCzxSiiQJrgX/60Lfyf7FF1Y0le4L/UMCi7vUJnuYkbU1Kfl9E0udnqwyPqlvQ== + dependencies: + "@ethersproject/transactions" "^5.0.0-beta.135" + underscore "1.9.1" + web3-core-helpers "1.3.4" + web3-core-promievent "1.3.4" + web3-core-subscriptions "1.3.4" + web3-utils "1.3.4" + +web3-core-promievent@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-core-promievent/-/web3-core-promievent-1.2.9.tgz#bb1c56aa6fac2f4b3c598510f06554d25c11c553" + integrity sha512-0eAUA2zjgXTleSrnc1wdoKQPPIHU6KHf4fAscu4W9kKrR+mqP1KsjYrxY9wUyjNnXxfQ+5M29ipvbiaK8OqdOw== + dependencies: + eventemitter3 "3.1.2" + +web3-core-promievent@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3-core-promievent/-/web3-core-promievent-1.3.4.tgz#d166239012d91496cdcbe91d5d54071ea818bc73" + integrity sha512-V61dZIeBwogg6hhZZUt0qL9hTp1WDhnsdjP++9fhTDr4vy/Gz8T5vibqT2LLg6lQC8i+Py33yOpMeMNjztaUaw== + dependencies: + eventemitter3 "4.0.4" + +web3-core-requestmanager@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-core-requestmanager/-/web3-core-requestmanager-1.2.9.tgz#dd6d855256c4dd681434fe0867f8cd742fe10503" + integrity sha512-1PwKV2m46ALUnIN5VPPgjOj8yMLJhhqZYvYJE34hTN5SErOkwhzx5zScvo5MN7v7KyQGFnpVCZKKGCiEnDmtFA== + dependencies: + underscore "1.9.1" + web3-core-helpers "1.2.9" + web3-providers-http "1.2.9" + web3-providers-ipc "1.2.9" + web3-providers-ws "1.2.9" + +web3-core-requestmanager@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3-core-requestmanager/-/web3-core-requestmanager-1.3.4.tgz#e105ced735c2b5fcedd5771e0ecf9879ae9c373f" + integrity sha512-xriouCrhVnVDYQ04TZXdEREZm0OOJzkSEsoN5bu4JYsA6e/HzROeU+RjDpMUxFMzN4wxmFZ+HWbpPndS3QwMag== + dependencies: + underscore "1.9.1" + util "^0.12.0" + web3-core-helpers "1.3.4" + web3-providers-http "1.3.4" + web3-providers-ipc "1.3.4" + web3-providers-ws "1.3.4" + +web3-core-subscriptions@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-core-subscriptions/-/web3-core-subscriptions-1.2.9.tgz#335fd7d15dfce5d78b4b7bef05ce4b3d7237b0e4" + integrity sha512-Y48TvXPSPxEM33OmXjGVDMzTd0j8X0t2+sDw66haeBS8eYnrEzasWuBZZXDq0zNUsqyxItgBGDn+cszkgEnFqg== + dependencies: + eventemitter3 "3.1.2" + underscore "1.9.1" + web3-core-helpers "1.2.9" + +web3-core-subscriptions@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3-core-subscriptions/-/web3-core-subscriptions-1.3.4.tgz#7b00e92bde21f792620cd02e6e508fcf4f4c31d3" + integrity sha512-drVHVDxh54hv7xmjIm44g4IXjfGj022fGw4/meB5R2D8UATFI40F73CdiBlyqk3DysP9njDOLTJFSQvEkLFUOg== + dependencies: + eventemitter3 "4.0.4" + underscore "1.9.1" + web3-core-helpers "1.3.4" + +web3-core@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-core/-/web3-core-1.2.9.tgz#2cba57aa259b6409db532d21bdf57db8d504fd3e" + integrity sha512-fSYv21IP658Ty2wAuU9iqmW7V+75DOYMVZsDH/c14jcF/1VXnedOcxzxSj3vArsCvXZNe6XC5/wAuGZyQwR9RA== + dependencies: + "@types/bn.js" "^4.11.4" + "@types/node" "^12.6.1" + bignumber.js "^9.0.0" + web3-core-helpers "1.2.9" + web3-core-method "1.2.9" + web3-core-requestmanager "1.2.9" + web3-utils "1.2.9" + +web3-core@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3-core/-/web3-core-1.3.4.tgz#2cc7ba7f35cc167f7a0a46fd5855f86e51d34ce8" + integrity sha512-7OJu46RpCEfTerl+gPvHXANR2RkLqAfW7l2DAvQ7wN0pnCzl9nEfdgW6tMhr31k3TR2fWucwKzCyyxMGzMHeSA== + dependencies: + "@types/bn.js" "^4.11.5" + "@types/node" "^12.12.6" + bignumber.js "^9.0.0" + web3-core-helpers "1.3.4" + web3-core-method "1.3.4" + web3-core-requestmanager "1.3.4" + web3-utils "1.3.4" + +web3-eth-abi@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-eth-abi/-/web3-eth-abi-1.2.9.tgz#14bedd7e4be04fcca35b2ac84af1400574cd8280" + integrity sha512-3YwUYbh/DMfDbhMWEebAdjSd5bj3ZQieOjLzWFHU23CaLEqT34sUix1lba+hgUH/EN6A7bKAuKOhR3p0OvTn7Q== + dependencies: + "@ethersproject/abi" "5.0.0-beta.153" + underscore "1.9.1" + web3-utils "1.2.9" + +web3-eth-abi@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3-eth-abi/-/web3-eth-abi-1.3.4.tgz#10f5d8b6080dbb6cbaa1bcef7e0c70573da6566f" + integrity sha512-PVSLXJ2dzdXsC+R24llIIEOS6S1KhG5qwNznJjJvXZFe3sqgdSe47eNvwUamZtCBjcrdR/HQr+L/FTxqJSf80Q== + dependencies: + "@ethersproject/abi" "5.0.7" + underscore "1.9.1" + web3-utils "1.3.4" + +web3-eth-abi@1.7.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/web3-eth-abi/-/web3-eth-abi-1.7.0.tgz#4fac9c7d9e5a62b57f8884b37371f515c766f3f4" + integrity sha512-heqR0bWxgCJwjWIhq2sGyNj9bwun5+Xox/LdZKe+WMyTSy0cXDXEAgv3XKNkXC4JqdDt/ZlbTEx4TWak4TRMSg== + dependencies: + "@ethersproject/abi" "5.0.7" + web3-utils "1.7.0" + +web3-eth-accounts@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-eth-accounts/-/web3-eth-accounts-1.2.9.tgz#7ec422df90fecb5243603ea49dc28726db7bdab6" + integrity sha512-jkbDCZoA1qv53mFcRHCinoCsgg8WH+M0YUO1awxmqWXRmCRws1wW0TsuSQ14UThih5Dxolgl+e+aGWxG58LMwg== + dependencies: + crypto-browserify "3.12.0" + eth-lib "^0.2.8" + ethereumjs-common "^1.3.2" + ethereumjs-tx "^2.1.1" + scrypt-js "^3.0.1" + underscore "1.9.1" + uuid "3.3.2" + web3-core "1.2.9" + web3-core-helpers "1.2.9" + web3-core-method "1.2.9" + web3-utils "1.2.9" + +web3-eth-accounts@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3-eth-accounts/-/web3-eth-accounts-1.3.4.tgz#cf513d78531c13ce079a5e7862820570350e79a5" + integrity sha512-gz9ReSmQEjqbYAjpmAx+UZF4CVMbyS4pfjSYWGAnNNI+Xz0f0u0kCIYXQ1UEaE+YeLcYiE+ZlZdgg6YoatO5nA== + dependencies: + crypto-browserify "3.12.0" + eth-lib "0.2.8" + ethereumjs-common "^1.3.2" + ethereumjs-tx "^2.1.1" + scrypt-js "^3.0.1" + underscore "1.9.1" + uuid "3.3.2" + web3-core "1.3.4" + web3-core-helpers "1.3.4" + web3-core-method "1.3.4" + web3-utils "1.3.4" + +web3-eth-contract@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-eth-contract/-/web3-eth-contract-1.2.9.tgz#713d9c6d502d8c8f22b696b7ffd8e254444e6bfd" + integrity sha512-PYMvJf7EG/HyssUZa+pXrc8IB06K/YFfWYyW4R7ed3sab+9wWUys1TlWxBCBuiBXOokSAyM6H6P6/cKEx8FT8Q== + dependencies: + "@types/bn.js" "^4.11.4" + underscore "1.9.1" + web3-core "1.2.9" + web3-core-helpers "1.2.9" + web3-core-method "1.2.9" + web3-core-promievent "1.2.9" + web3-core-subscriptions "1.2.9" + web3-eth-abi "1.2.9" + web3-utils "1.2.9" + +web3-eth-contract@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3-eth-contract/-/web3-eth-contract-1.3.4.tgz#1ea2dd71be0c4a9cf4772d4f75dbb2fa99751472" + integrity sha512-Fvy8ZxUksQY2ePt+XynFfOiSqxgQtMn4m2NJs6VXRl2Inl17qyRi/nIJJVKTcENLocm+GmZ/mxq2eOE5u02nPg== + dependencies: + "@types/bn.js" "^4.11.5" + underscore "1.9.1" + web3-core "1.3.4" + web3-core-helpers "1.3.4" + web3-core-method "1.3.4" + web3-core-promievent "1.3.4" + web3-core-subscriptions "1.3.4" + web3-eth-abi "1.3.4" + web3-utils "1.3.4" + +web3-eth-ens@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-eth-ens/-/web3-eth-ens-1.2.9.tgz#577b9358c036337833fb2bdc59c11be7f6f731b6" + integrity sha512-kG4+ZRgZ8I1WYyOBGI8QVRHfUSbbJjvJAGA1AF/NOW7JXQ+x7gBGeJw6taDWJhSshMoEKWcsgvsiuoG4870YxQ== + dependencies: + content-hash "^2.5.2" + eth-ens-namehash "2.0.8" + underscore "1.9.1" + web3-core "1.2.9" + web3-core-helpers "1.2.9" + web3-core-promievent "1.2.9" + web3-eth-abi "1.2.9" + web3-eth-contract "1.2.9" + web3-utils "1.2.9" + +web3-eth-ens@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3-eth-ens/-/web3-eth-ens-1.3.4.tgz#a7e4bb18481fb0e2ce5bfb3b3da2fbb0ad78cefe" + integrity sha512-b0580tQyQwpV2wyacwQiBEfQmjCUln5iPhge3IBIMXaI43BUNtH3lsCL9ERFQeOdweB4o+6rYyNYr6xbRcSytg== + dependencies: + content-hash "^2.5.2" + eth-ens-namehash "2.0.8" + underscore "1.9.1" + web3-core "1.3.4" + web3-core-helpers "1.3.4" + web3-core-promievent "1.3.4" + web3-eth-abi "1.3.4" + web3-eth-contract "1.3.4" + web3-utils "1.3.4" + +web3-eth-iban@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-eth-iban/-/web3-eth-iban-1.2.9.tgz#4ebf3d8783f34d04c4740dc18938556466399f7a" + integrity sha512-RtdVvJE0pyg9dHLy0GzDiqgnLnssSzfz/JYguhC1wsj9+Gnq1M6Diy3NixACWUAp6ty/zafyOaZnNQ+JuH9TjQ== + dependencies: + bn.js "4.11.8" + web3-utils "1.2.9" + +web3-eth-iban@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3-eth-iban/-/web3-eth-iban-1.3.4.tgz#5eb7a564e0dcf68730d68f48f95dd207cd173d81" + integrity sha512-Y7/hLjVvIN/OhaAyZ8L/hxbTqVX6AFTl2RwUXR6EEU9oaLydPcMjAx/Fr8mghUvQS3QJSr+UGubP3W4SkyNiYw== + dependencies: + bn.js "^4.11.9" + web3-utils "1.3.4" + +web3-eth-personal@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-eth-personal/-/web3-eth-personal-1.2.9.tgz#9b95eb159b950b83cd8ae15873e1d57711b7a368" + integrity sha512-cFiNrktxZ1C/rIdJFzQTvFn3/0zcsR3a+Jf8Y3KxeQDHszQtosjLWptP7bsUmDwEh4hzh0Cy3KpOxlYBWB8bJQ== + dependencies: + "@types/node" "^12.6.1" + web3-core "1.2.9" + web3-core-helpers "1.2.9" + web3-core-method "1.2.9" + web3-net "1.2.9" + web3-utils "1.2.9" + +web3-eth-personal@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3-eth-personal/-/web3-eth-personal-1.3.4.tgz#0d0e0abea3447283d7ee5658ed312990c9bf48dd" + integrity sha512-JiTbaktYVk1j+S2EDooXAhw5j/VsdvZfKRmHtXUe/HizPM9ETXmj1+ne4RT6m+950jQ7DJwUF3XU1FKYNtEDwQ== + dependencies: + "@types/node" "^12.12.6" + web3-core "1.3.4" + web3-core-helpers "1.3.4" + web3-core-method "1.3.4" + web3-net "1.3.4" + web3-utils "1.3.4" + +web3-eth@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-eth/-/web3-eth-1.2.9.tgz#e40e7b88baffc9b487193211c8b424dc944977b3" + integrity sha512-sIKO4iE9FEBa/CYUd6GdPd7GXt/wISqxUd8PlIld6+hvMJj02lgO7Z7p5T9mZIJcIZJGvZX81ogx8oJ9yif+Ag== + dependencies: + underscore "1.9.1" + web3-core "1.2.9" + web3-core-helpers "1.2.9" + web3-core-method "1.2.9" + web3-core-subscriptions "1.2.9" + web3-eth-abi "1.2.9" + web3-eth-accounts "1.2.9" + web3-eth-contract "1.2.9" + web3-eth-ens "1.2.9" + web3-eth-iban "1.2.9" + web3-eth-personal "1.2.9" + web3-net "1.2.9" + web3-utils "1.2.9" + +web3-eth@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3-eth/-/web3-eth-1.3.4.tgz#7c4607685e66a1c43e3e315e526c959f24f96907" + integrity sha512-8OIVMLbvmx+LB5RZ4tDhXuFGWSdNMrCZ4HM0+PywQ08uEcmAcqTMFAn4vdPii+J8gCatZR501r1KdzX3SDLoPw== + dependencies: + underscore "1.9.1" + web3-core "1.3.4" + web3-core-helpers "1.3.4" + web3-core-method "1.3.4" + web3-core-subscriptions "1.3.4" + web3-eth-abi "1.3.4" + web3-eth-accounts "1.3.4" + web3-eth-contract "1.3.4" + web3-eth-ens "1.3.4" + web3-eth-iban "1.3.4" + web3-eth-personal "1.3.4" + web3-net "1.3.4" + web3-utils "1.3.4" + +web3-net@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-net/-/web3-net-1.2.9.tgz#51d248ed1bc5c37713c4ac40c0073d9beacd87d3" + integrity sha512-d2mTn8jPlg+SI2hTj2b32Qan6DmtU9ap/IUlJTeQbZQSkTLf0u9suW8Vjwyr4poJYXTurdSshE7OZsPNn30/ZA== + dependencies: + web3-core "1.2.9" + web3-core-method "1.2.9" + web3-utils "1.2.9" + +web3-net@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3-net/-/web3-net-1.3.4.tgz#d76158bf0b4a7b3b14352b4f95472db9efc57a2a" + integrity sha512-wVyqgVC3Zt/0uGnBiR3GpnsS8lvOFTDgWZMxAk9C6Guh8aJD9MUc7pbsw5rHrPUVe6S6RUfFJvh/Xq8oMIQgSw== + dependencies: + web3-core "1.3.4" + web3-core-method "1.3.4" + web3-utils "1.3.4" + +web3-providers-http@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-providers-http/-/web3-providers-http-1.2.9.tgz#e698aa5377e2019c24c5a1e6efa0f51018728934" + integrity sha512-F956tCIj60Ttr0UvEHWFIhx+be3He8msoPzyA44/kfzzYoMAsCFRn5cf0zQG6al0znE75g6HlWVSN6s3yAh51A== + dependencies: + web3-core-helpers "1.2.9" + xhr2-cookies "1.1.0" + +web3-providers-http@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3-providers-http/-/web3-providers-http-1.3.4.tgz#89389e18e27148faa2fef58842740ffadbdda8cc" + integrity sha512-aIg/xHXvxpqpFU70sqfp+JC3sGkLfAimRKTUhG4oJZ7U+tTcYTHoxBJj+4A3Id4JAoKiiv0k1/qeyQ8f3rMC3g== + dependencies: + web3-core-helpers "1.3.4" + xhr2-cookies "1.1.0" + +web3-providers-ipc@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-providers-ipc/-/web3-providers-ipc-1.2.9.tgz#6159eacfcd7ac31edc470d93ef10814fe874763b" + integrity sha512-NQ8QnBleoHA2qTJlqoWu7EJAD/FR5uimf7Ielzk4Z2z+m+6UAuJdJMSuQNj+Umhz9L/Ys6vpS1vHx9NizFl+aQ== + dependencies: + oboe "2.1.4" + underscore "1.9.1" + web3-core-helpers "1.2.9" + +web3-providers-ipc@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3-providers-ipc/-/web3-providers-ipc-1.3.4.tgz#b963518989b1b7847063cdd461ff73b83855834a" + integrity sha512-E0CvXEJElr/TIlG1YfJeO3Le5NI/4JZM+1SsEdiPIfBUAJN18oOoum138EBGKv5+YaLKZUtUuJSXWjIIOR/0Ig== + dependencies: + oboe "2.1.5" + underscore "1.9.1" + web3-core-helpers "1.3.4" + +web3-providers-ws@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-providers-ws/-/web3-providers-ws-1.2.9.tgz#22c2006655ec44b4ad2b41acae62741a6ae7a88c" + integrity sha512-6+UpvINeI//dglZoAKStUXqxDOXJy6Iitv2z3dbgInG4zb8tkYl/VBDL80UjUg3ZvzWG0g7EKY2nRPEpON2TFA== + dependencies: + eventemitter3 "^4.0.0" + underscore "1.9.1" + web3-core-helpers "1.2.9" + websocket "^1.0.31" + +web3-providers-ws@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3-providers-ws/-/web3-providers-ws-1.3.4.tgz#b94c2e0ec51a0c472abdec53a472b5bf8176bec1" + integrity sha512-WBd9hk2fUAdrbA3kUyUk94ZeILtE6txLeoVVvIKAw2bPegx+RjkLyxC1Du0oceKgQ/qQWod8CCzl1E/GgTP+MQ== + dependencies: + eventemitter3 "4.0.4" + underscore "1.9.1" + web3-core-helpers "1.3.4" + websocket "^1.0.32" + +web3-shh@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-shh/-/web3-shh-1.2.9.tgz#c4ba70d6142cfd61341a50752d8cace9a0370911" + integrity sha512-PWa8b/EaxaMinFaxy6cV0i0EOi2M7a/ST+9k9nhyhCjVa2vzXuNoBNo2IUOmeZ0WP2UQB8ByJ2+p4htlJaDOjA== + dependencies: + web3-core "1.2.9" + web3-core-method "1.2.9" + web3-core-subscriptions "1.2.9" + web3-net "1.2.9" + +web3-shh@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3-shh/-/web3-shh-1.3.4.tgz#b7d29e118f26416c1a74575e585be379cc01a77a" + integrity sha512-zoeww5mxLh3xKcqbX85irQbtFe5pc5XwrgjvmdMkhkOdZzPASlWOgqzUFtaPykpLwC3yavVx4jG5RqifweXLUA== + dependencies: + web3-core "1.3.4" + web3-core-method "1.3.4" + web3-core-subscriptions "1.3.4" + web3-net "1.3.4" + +web3-utils@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3-utils/-/web3-utils-1.2.9.tgz#abe11735221627da943971ef1a630868fb9c61f3" + integrity sha512-9hcpuis3n/LxFzEVjwnVgvJzTirS2S9/MiNAa7l4WOEoywY+BSNwnRX4MuHnjkh9NY25B6QOjuNG6FNnSjTw1w== + dependencies: + bn.js "4.11.8" + eth-lib "0.2.7" + ethereum-bloom-filters "^1.0.6" + ethjs-unit "0.1.6" + number-to-bn "1.7.0" + randombytes "^2.1.0" + underscore "1.9.1" + utf8 "3.0.0" + +web3-utils@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3-utils/-/web3-utils-1.3.4.tgz#9b1aa30d7549f860b573e7bb7e690999e7192198" + integrity sha512-/vC2v0MaZNpWooJfpRw63u0Y3ag2gNjAWiLtMSL6QQLmCqCy4SQIndMt/vRyx0uMoeGt1YTwSXEcHjUzOhLg0A== + dependencies: + bn.js "^4.11.9" + eth-lib "0.2.8" + ethereum-bloom-filters "^1.0.6" + ethjs-unit "0.1.6" + number-to-bn "1.7.0" + randombytes "^2.1.0" + underscore "1.9.1" + utf8 "3.0.0" + +web3-utils@1.7.0: + version "1.7.0" + resolved "https://registry.yarnpkg.com/web3-utils/-/web3-utils-1.7.0.tgz#c59f0fd43b2449357296eb54541810b99b1c771c" + integrity sha512-O8Tl4Ky40Sp6pe89Olk2FsaUkgHyb5QAXuaKo38ms3CxZZ4d3rPGfjP9DNKGm5+IUgAZBNpF1VmlSmNCqfDI1w== + dependencies: + bn.js "^4.11.9" + ethereum-bloom-filters "^1.0.6" + ethereumjs-util "^7.1.0" + ethjs-unit "0.1.6" + number-to-bn "1.7.0" + randombytes "^2.1.0" + utf8 "3.0.0" + +web3@1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/web3/-/web3-1.2.9.tgz#cbcf1c0fba5e213a6dfb1f2c1f4b37062e4ce337" + integrity sha512-Mo5aBRm0JrcNpN/g4VOrDzudymfOnHRC3s2VarhYxRA8aWgF5rnhQ0ziySaugpic1gksbXPe105pUWyRqw8HUA== + dependencies: + web3-bzz "1.2.9" + web3-core "1.2.9" + web3-eth "1.2.9" + web3-eth-personal "1.2.9" + web3-net "1.2.9" + web3-shh "1.2.9" + web3-utils "1.2.9" + +web3@^1.0.0-beta.34: + version "1.3.4" + resolved "https://registry.yarnpkg.com/web3/-/web3-1.3.4.tgz#31e014873360aa5840eb17f9f171190c967cffb7" + integrity sha512-D6cMb2EtTMLHgdGbkTPGl/Qi7DAfczR+Lp7iFX3bcu/bsD9V8fZW69hA8v5cRPNGzXUwVQebk3bS17WKR4cD2w== + dependencies: + web3-bzz "1.3.4" + web3-core "1.3.4" + web3-eth "1.3.4" + web3-eth-personal "1.3.4" + web3-net "1.3.4" + web3-shh "1.3.4" + web3-utils "1.3.4" + +webidl-conversions@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-2.0.1.tgz#3bf8258f7d318c7443c36f2e169402a1a6703506" + integrity sha1-O/glj30xjHRDw28uFpQCoaZwNQY= + +webidl-conversions@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871" + integrity sha1-JFNCdeKnvGvnvIZhHMFq4KVlSHE= + +websocket@^1.0.31, websocket@^1.0.32: + version "1.0.33" + resolved "https://registry.yarnpkg.com/websocket/-/websocket-1.0.33.tgz#407f763fc58e74a3fa41ca3ae5d78d3f5e3b82a5" + integrity sha512-XwNqM2rN5eh3G2CUQE3OHZj+0xfdH42+OFK6LdC2yqiC0YU8e5UK0nYre220T0IyyN031V/XOvtHvXozvJYFWA== + dependencies: + bufferutil "^4.0.1" + debug "^2.2.0" + es5-ext "^0.10.50" + typedarray-to-buffer "^3.1.5" + utf-8-validate "^5.0.2" + yaeti "^0.0.6" + +websql@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/websql/-/websql-1.0.0.tgz#1bd00b27392893134715d5dd6941fd89e730bab5" + integrity sha512-7iZ+u28Ljw5hCnMiq0BCOeSYf0vCFQe/ORY0HgscTiKjQed8WqugpBUggJ2NTnB9fahn1kEnPRX2jf8Px5PhJw== + dependencies: + argsarray "^0.0.1" + immediate "^3.2.2" + noop-fn "^1.0.0" + sqlite3 "^4.0.0" + tiny-queue "^0.2.1" + +whatwg-fetch@2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/whatwg-fetch/-/whatwg-fetch-2.0.3.tgz#9c84ec2dcf68187ff00bc64e1274b442176e1c84" + integrity sha1-nITsLc9oGH/wC8ZOEnS0QhduHIQ= + +whatwg-fetch@2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/whatwg-fetch/-/whatwg-fetch-2.0.4.tgz#dde6a5df315f9d39991aa17621853d720b85566f" + integrity sha512-dcQ1GWpOD/eEQ97k66aiEVpNnapVj90/+R+SXTPYGHpYBBypfKJEQjLrvMZ7YXbKm21gXd4NcuxUTjiv1YtLng== + +whatwg-url-compat@~0.6.5: + version "0.6.5" + resolved "https://registry.yarnpkg.com/whatwg-url-compat/-/whatwg-url-compat-0.6.5.tgz#00898111af689bb097541cd5a45ca6c8798445bf" + integrity sha1-AImBEa9om7CXVBzVpFymyHmERb8= + dependencies: + tr46 "~0.0.1" + +whatwg-url@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-5.0.0.tgz#966454e8765462e37644d3626f6742ce8b70965d" + integrity sha1-lmRU6HZUYuN2RNNib2dCzotwll0= + dependencies: + tr46 "~0.0.3" + webidl-conversions "^3.0.0" + +which-boxed-primitive@^1.0.1, which-boxed-primitive@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz#13757bc89b209b049fe5d86430e21cf40a89a8e6" + integrity sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg== + dependencies: + is-bigint "^1.0.1" + is-boolean-object "^1.1.0" + is-number-object "^1.0.4" + is-string "^1.0.5" + is-symbol "^1.0.3" + +which-module@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/which-module/-/which-module-2.0.0.tgz#d9ef07dce77b9902b8a3a8fa4b31c3e3f7e6e87a" + integrity sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho= + +which-typed-array@^1.1.2: + version "1.1.4" + resolved "https://registry.yarnpkg.com/which-typed-array/-/which-typed-array-1.1.4.tgz#8fcb7d3ee5adf2d771066fba7cf37e32fe8711ff" + integrity sha512-49E0SpUe90cjpoc7BOJwyPHRqSAd12c10Qm2amdEZrJPCY2NDxaW01zHITrem+rnETY3dwrbH3UUrUwagfCYDA== + dependencies: + available-typed-arrays "^1.0.2" + call-bind "^1.0.0" + es-abstract "^1.18.0-next.1" + foreach "^2.0.5" + function-bind "^1.1.1" + has-symbols "^1.0.1" + is-typed-array "^1.1.3" + +which@2.0.2, which@^2.0.0, which@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1" + integrity sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA== + dependencies: + isexe "^2.0.0" + +wide-align@1.1.3, wide-align@^1.1.0: + version "1.1.3" + resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.3.tgz#ae074e6bdc0c14a431e804e624549c633b000457" + integrity sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA== + dependencies: + string-width "^1.0.2 || 2" + +window-size@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/window-size/-/window-size-0.2.0.tgz#b4315bb4214a3d7058ebeee892e13fa24d98b075" + integrity sha1-tDFbtCFKPXBY6+7okuE/ok2YsHU= + +word-wrap@~1.2.3: + version "1.2.3" + resolved "https://registry.yarnpkg.com/word-wrap/-/word-wrap-1.2.3.tgz#610636f6b1f703891bd34771ccb17fb93b47079c" + integrity sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ== + +wordwrap@~0.0.2: + version "0.0.3" + resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107" + integrity sha512-1tMA907+V4QmxV7dbRvb4/8MaRALK6q9Abid3ndMYnbyo8piisCmeONVqVSXqQA3KaP4SLt5b7ud6E2sqP8TFw== + +workerpool@6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.0.0.tgz#85aad67fa1a2c8ef9386a1b43539900f61d03d58" + integrity sha512-fU2OcNA/GVAJLLyKUoHkAgIhKb0JoCpSjLC/G2vYKxUjVmQwGbRVeoPJ1a8U4pnVofz4AQV5Y/NEw8oKqxEBtA== + +wrap-ansi@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-2.1.0.tgz#d8fc3d284dd05794fe84973caecdd1cf824fdd85" + integrity sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU= + dependencies: + string-width "^1.0.1" + strip-ansi "^3.0.1" + +wrap-ansi@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-5.1.0.tgz#1fd1f67235d5b6d0fee781056001bfb694c03b09" + integrity sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q== + dependencies: + ansi-styles "^3.2.0" + string-width "^3.0.0" + strip-ansi "^5.0.0" + +wrap-ansi@^6.2.0: + version "6.2.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz#e9393ba07102e6c91a3b221478f0257cd2856e53" + integrity sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + +wrappy@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" + integrity sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ== + +write-file-atomic@^2.0.0: + version "2.4.3" + resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-2.4.3.tgz#1fd2e9ae1df3e75b8d8c367443c692d4ca81f481" + integrity sha512-GaETH5wwsX+GcnzhPgKcKjJ6M2Cq3/iZp1WyY/X1CSqrW+jVNM9Y7D8EC2sM4ZG/V8wZlSniJnCKWPmBYAucRQ== + dependencies: + graceful-fs "^4.1.11" + imurmurhash "^0.1.4" + signal-exit "^3.0.2" + +write-stream@~0.4.3: + version "0.4.3" + resolved "https://registry.yarnpkg.com/write-stream/-/write-stream-0.4.3.tgz#83cc8c0347d0af6057a93862b4e3ae01de5c81c1" + integrity sha1-g8yMA0fQr2BXqThitOOuAd5cgcE= + dependencies: + readable-stream "~0.0.2" + +ws@7.1.0: + version "7.1.0" + resolved "https://registry.yarnpkg.com/ws/-/ws-7.1.0.tgz#0395646c6fcc3ac56abf61ce1a42039637a6bd98" + integrity sha512-Swie2C4fs7CkwlHu1glMePLYJJsWjzhl1vm3ZaLplD0h7OMkZyZ6kLTB/OagiU923bZrPFXuDTeEqaEN4NWG4g== + dependencies: + async-limiter "^1.0.0" + +ws@7.4.3: + version "7.4.3" + resolved "https://registry.yarnpkg.com/ws/-/ws-7.4.3.tgz#1f9643de34a543b8edb124bdcbc457ae55a6e5cd" + integrity sha512-hr6vCR76GsossIRsr8OLR9acVVm1jyfEWvhbNjtgPOrfvAlKzvyeg/P6r8RuDjRyrcQoPQT7K0DGEPc7Ae6jzA== + +ws@^3.0.0: + version "3.3.3" + resolved "https://registry.yarnpkg.com/ws/-/ws-3.3.3.tgz#f1cf84fe2d5e901ebce94efaece785f187a228f2" + integrity sha512-nnWLa/NwZSt4KQJu51MYlCcSQ5g7INpOrOMt4XV8j4dqTXdmlUmSHQ8/oLC069ckre0fRsgfvsKwbTdtKLCDkA== + dependencies: + async-limiter "~1.0.0" + safe-buffer "~5.1.0" + ultron "~1.1.0" + +ws@^5.1.1: + version "5.2.2" + resolved "https://registry.yarnpkg.com/ws/-/ws-5.2.2.tgz#dffef14866b8e8dc9133582514d1befaf96e980f" + integrity sha512-jaHFD6PFv6UgoIVda6qZllptQsMlDEJkTQcybzzXDYM1XO9Y8em691FGMPmM46WGyLU4z9KMgQN+qrux/nhlHA== + dependencies: + async-limiter "~1.0.0" + +"ws@^5.2.0 || ^6.0.0 || ^7.0.0": + version "7.5.5" + resolved "https://registry.yarnpkg.com/ws/-/ws-7.5.5.tgz#8b4bc4af518cfabd0473ae4f99144287b33eb881" + integrity sha512-BAkMFcAzl8as1G/hArkxOxq3G7pjUqQ3gzYbLL0/5zNkph70e+lCoxBGnm6AW1+/aiNeV4fnKqZ8m4GZewmH2w== + +ws@^7.4.5: + version "7.5.9" + resolved "https://registry.yarnpkg.com/ws/-/ws-7.5.9.tgz#54fa7db29f4c7cec68b1ddd3a89de099942bb591" + integrity sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q== + +xdg-basedir@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-3.0.0.tgz#496b2cc109eca8dbacfe2dc72b603c17c5870ad4" + integrity sha1-SWsswQnsqNus/i3HK2A8F8WHCtQ= + +xhr-request-promise@^0.1.2: + version "0.1.3" + resolved "https://registry.yarnpkg.com/xhr-request-promise/-/xhr-request-promise-0.1.3.tgz#2d5f4b16d8c6c893be97f1a62b0ed4cf3ca5f96c" + integrity sha512-YUBytBsuwgitWtdRzXDDkWAXzhdGB8bYm0sSzMPZT7Z2MBjMSTHFsyCT1yCRATY+XC69DUrQraRAEgcoCRaIPg== + dependencies: + xhr-request "^1.1.0" + +xhr-request@^1.0.1, xhr-request@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/xhr-request/-/xhr-request-1.1.0.tgz#f4a7c1868b9f198723444d82dcae317643f2e2ed" + integrity sha512-Y7qzEaR3FDtL3fP30k9wO/e+FBnBByZeybKOhASsGP30NIkRAAkKD/sCnLvgEfAIEC1rcmK7YG8f4oEnIrrWzA== + dependencies: + buffer-to-arraybuffer "^0.0.5" + object-assign "^4.1.1" + query-string "^5.0.1" + simple-get "^2.7.0" + timed-out "^4.0.1" + url-set-query "^1.0.0" + xhr "^2.0.4" + +xhr2-cookies@1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/xhr2-cookies/-/xhr2-cookies-1.1.0.tgz#7d77449d0999197f155cb73b23df72505ed89d48" + integrity sha1-fXdEnQmZGX8VXLc7I99yUF7YnUg= + dependencies: + cookiejar "^2.1.1" + +xhr@^2.0.4, xhr@^2.2.0, xhr@^2.3.3: + version "2.6.0" + resolved "https://registry.yarnpkg.com/xhr/-/xhr-2.6.0.tgz#b69d4395e792b4173d6b7df077f0fc5e4e2b249d" + integrity sha512-/eCGLb5rxjx5e3mF1A7s+pLlR6CGyqWN91fv1JgER5mVWg1MZmlhBvy9kjcsOdRk8RrIujotWyJamfyrp+WIcA== + dependencies: + global "~4.4.0" + is-function "^1.0.1" + parse-headers "^2.0.0" + xtend "^4.0.0" + +"xml-name-validator@>= 2.0.1 < 3.0.0": + version "2.0.1" + resolved "https://registry.yarnpkg.com/xml-name-validator/-/xml-name-validator-2.0.1.tgz#4d8b8f1eccd3419aa362061becef515e1e559635" + integrity sha1-TYuPHszTQZqjYgYb7O9RXh5VljU= + +xmlhttprequest@1.8.0: + version "1.8.0" + resolved "https://registry.yarnpkg.com/xmlhttprequest/-/xmlhttprequest-1.8.0.tgz#67fe075c5c24fef39f9d65f5f7b7fe75171968fc" + integrity sha1-Z/4HXFwk/vOfnWX197f+dRcZaPw= + +xss@^1.0.8: + version "1.0.10" + resolved "https://registry.yarnpkg.com/xss/-/xss-1.0.10.tgz#5cd63a9b147a755a14cb0455c7db8866120eb4d2" + integrity sha512-qmoqrRksmzqSKvgqzN0055UFWY7OKx1/9JWeRswwEVX9fCG5jcYRxa/A2DHcmZX6VJvjzHRQ2STeeVcQkrmLSw== + dependencies: + commander "^2.20.3" + cssfilter "0.0.10" + +"xtend@>=4.0.0 <4.1.0-0", xtend@^4.0.0, xtend@^4.0.1, xtend@^4.0.2, xtend@~4.0.0, xtend@~4.0.1: + version "4.0.2" + resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.2.tgz#bb72779f5fa465186b1f438f674fa347fdb5db54" + integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ== + +xtend@~2.1.1: + version "2.1.2" + resolved "https://registry.yarnpkg.com/xtend/-/xtend-2.1.2.tgz#6efecc2a4dad8e6962c4901b337ce7ba87b5d28b" + integrity sha1-bv7MKk2tjmlixJAbM3znuoe10os= + dependencies: + object-keys "~0.4.0" + +y18n@^3.2.1: + version "3.2.2" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-3.2.2.tgz#85c901bd6470ce71fc4bb723ad209b70f7f28696" + integrity sha512-uGZHXkHnhF0XeeAPgnKfPv1bgKAYyVvmNL1xlKsPYZPaIHxGti2hHqvOCQv71XMsLxu1QjergkqogUnms5D3YQ== + +y18n@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.1.tgz#8db2b83c31c5d75099bb890b23f3094891e247d4" + integrity sha512-wNcy4NvjMYL8gogWWYAO7ZFWFfHcbdbE57tZO8e4cbpj8tfUcwrwqSl3ad8HxpYWCdXcJUCeKKZS62Av1affwQ== + +yaeti@^0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/yaeti/-/yaeti-0.0.6.tgz#f26f484d72684cf42bedfb76970aa1608fbf9577" + integrity sha1-8m9ITXJoTPQr7ft2lwqhYI+/lXc= + +yallist@^3.0.0, yallist@^3.0.2, yallist@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.1.1.tgz#dbb7daf9bfd8bac9ab45ebf602b8cbad0d5d08fd" + integrity sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g== + +yallist@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72" + integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== + +yaml@1.9.2: + version "1.9.2" + resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.9.2.tgz#f0cfa865f003ab707663e4f04b3956957ea564ed" + integrity sha512-HPT7cGGI0DuRcsO51qC1j9O16Dh1mZ2bnXwsi0jrSpsLz0WxOLSLXfkABVl6bZO629py3CU+OMJtpNHDLB97kg== + dependencies: + "@babel/runtime" "^7.9.2" + +yaml@^1.7.2: + version "1.10.2" + resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b" + integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg== + +yargs-parser@13.1.2, yargs-parser@^13.1.2: + version "13.1.2" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-13.1.2.tgz#130f09702ebaeef2650d54ce6e3e5706f7a4fb38" + integrity sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg== + dependencies: + camelcase "^5.0.0" + decamelize "^1.2.0" + +yargs-parser@^15.0.1: + version "15.0.1" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-15.0.1.tgz#54786af40b820dcb2fb8025b11b4d659d76323b3" + integrity sha512-0OAMV2mAZQrs3FkNpDQcBk1x5HXb8X4twADss4S0Iuk+2dGnLOE/fRHrsYm542GduMveyA77OF4wrNJuanRCWw== + dependencies: + camelcase "^5.0.0" + decamelize "^1.2.0" + +yargs-parser@^16.1.0: + version "16.1.0" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-16.1.0.tgz#73747d53ae187e7b8dbe333f95714c76ea00ecf1" + integrity sha512-H/V41UNZQPkUMIT5h5hiwg4QKIY1RPvoBV4XcjUbRM8Bk2oKqqyZ0DIEbTFZB0XjbtSPG8SAa/0DxCQmiRgzKg== + dependencies: + camelcase "^5.0.0" + decamelize "^1.2.0" + +yargs-parser@^18.1.2: + version "18.1.3" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-18.1.3.tgz#be68c4975c6b2abf469236b0c870362fab09a7b0" + integrity sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ== + dependencies: + camelcase "^5.0.0" + decamelize "^1.2.0" + +yargs-parser@^2.4.0: + version "2.4.1" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-2.4.1.tgz#85568de3cf150ff49fa51825f03a8c880ddcc5c4" + integrity sha1-hVaN488VD/SfpRgl8DqMiA3cxcQ= + dependencies: + camelcase "^3.0.0" + lodash.assign "^4.0.6" + +yargs-unparser@1.6.1: + version "1.6.1" + resolved "https://registry.yarnpkg.com/yargs-unparser/-/yargs-unparser-1.6.1.tgz#bd4b0ee05b4c94d058929c32cb09e3fce71d3c5f" + integrity sha512-qZV14lK9MWsGCmcr7u5oXGH0dbGqZAIxTDrWXZDo5zUr6b6iUmelNKO6x6R1dQT24AH3LgRxJpr8meWy2unolA== + dependencies: + camelcase "^5.3.1" + decamelize "^1.2.0" + flat "^4.1.0" + is-plain-obj "^1.1.0" + yargs "^14.2.3" + +yargs@13.3.2: + version "13.3.2" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-13.3.2.tgz#ad7ffefec1aa59565ac915f82dccb38a9c31a2dd" + integrity sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw== + dependencies: + cliui "^5.0.0" + find-up "^3.0.0" + get-caller-file "^2.0.1" + require-directory "^2.1.1" + require-main-filename "^2.0.0" + set-blocking "^2.0.0" + string-width "^3.0.0" + which-module "^2.0.0" + y18n "^4.0.0" + yargs-parser "^13.1.2" + +yargs@4.6.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-4.6.0.tgz#cb4050c0159bfb6bb649c0f4af550526a84619dc" + integrity sha1-y0BQwBWb+2u2ScD0r1UFJqhGGdw= + dependencies: + camelcase "^2.0.1" + cliui "^3.2.0" + decamelize "^1.1.1" + lodash.assign "^4.0.3" + os-locale "^1.4.0" + pkg-conf "^1.1.2" + read-pkg-up "^1.0.1" + require-main-filename "^1.0.1" + string-width "^1.0.1" + window-size "^0.2.0" + y18n "^3.2.1" + yargs-parser "^2.4.0" + +yargs@^14.2.3: + version "14.2.3" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-14.2.3.tgz#1a1c3edced1afb2a2fea33604bc6d1d8d688a414" + integrity sha512-ZbotRWhF+lkjijC/VhmOT9wSgyBQ7+zr13+YLkhfsSiTriYsMzkTUFP18pFhWwBeMa5gUc1MzbhrO6/VB7c9Xg== + dependencies: + cliui "^5.0.0" + decamelize "^1.2.0" + find-up "^3.0.0" + get-caller-file "^2.0.1" + require-directory "^2.1.1" + require-main-filename "^2.0.0" + set-blocking "^2.0.0" + string-width "^3.0.0" + which-module "^2.0.0" + y18n "^4.0.0" + yargs-parser "^15.0.1" + +yargs@^15.3.1: + version "15.4.1" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-15.4.1.tgz#0d87a16de01aee9d8bec2bfbf74f67851730f4f8" + integrity sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A== + dependencies: + cliui "^6.0.0" + decamelize "^1.2.0" + find-up "^4.1.0" + get-caller-file "^2.0.1" + require-directory "^2.1.1" + require-main-filename "^2.0.0" + set-blocking "^2.0.0" + string-width "^4.2.0" + which-module "^2.0.0" + y18n "^4.0.0" + yargs-parser "^18.1.2" + +yocto-queue@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" + integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== + +zen-observable-ts@^0.8.21: + version "0.8.21" + resolved "https://registry.yarnpkg.com/zen-observable-ts/-/zen-observable-ts-0.8.21.tgz#85d0031fbbde1eba3cd07d3ba90da241215f421d" + integrity sha512-Yj3yXweRc8LdRMrCC8nIc4kkjWecPAUVh0TI0OUrWXx6aX790vLcDlWca6I4vsyCGH3LpWxq0dJRcMOFoVqmeg== + dependencies: + tslib "^1.9.3" + zen-observable "^0.8.0" + +zen-observable@^0.8.0, zen-observable@^0.8.14: + version "0.8.15" + resolved "https://registry.yarnpkg.com/zen-observable/-/zen-observable-0.8.15.tgz#96415c512d8e3ffd920afd3889604e30b9eaac15" + integrity sha512-PQ2PC7R9rslx84ndNBZB/Dkv8V8fZEpk83RLgXtYd0fwUgEjseMn1Dgajh2x6S8QbZAFa9p2qVCEuYZNgve0dQ== diff --git a/tests/src/docker_utils.rs b/tests/src/docker_utils.rs new file mode 100644 index 00000000000..617dbf11c3e --- /dev/null +++ b/tests/src/docker_utils.rs @@ -0,0 +1,269 @@ +use bollard::image::CreateImageOptions; +use bollard::models::HostConfig; +use bollard::{container, Docker}; +use futures::stream::FuturesUnordered; +use futures::StreamExt; +use std::collections::HashMap; +use tokio::time::{sleep, Duration}; + +use crate::helpers::{contains_subslice, MappedPorts}; + +type DockerError = bollard::errors::Error; + +const POSTGRES_IMAGE: &str = "postgres:latest"; +const IPFS_IMAGE: &str = "ipfs/go-ipfs:v0.10.0"; +const GANACHE_IMAGE: &str = "trufflesuite/ganache-cli:latest"; + +pub async fn pull_service_images() { + let client = + Docker::connect_with_local_defaults().expect("Failed to connect to docker daemon."); + + let image_names = [POSTGRES_IMAGE, IPFS_IMAGE, GANACHE_IMAGE]; + + image_names + .iter() + .map(|image_name| pull_image(&client, image_name)) + // .await them in no specific order. + .collect::>() + .collect::>() + .await; +} + +async fn pull_image(client: &Docker, image_name: &str) { + let options = Some(CreateImageOptions { + from_image: image_name, + ..Default::default() + }); + + let mut stream = client.create_image(options, None, None); + + while let Some(res) = stream.next().await { + if let Err(err) = res { + panic!("Error when pulling docker image `{}`: {}", image_name, err) + } + } +} + +pub async fn kill_and_remove(client: &Docker, container_name: &str) -> Result<(), DockerError> { + client.kill_container::<&str>(container_name, None).await?; + client.remove_container(container_name, None).await +} + +/// Represents all possible service containers to be spawned +#[derive(Debug, Copy, Clone)] +pub enum ServiceContainerKind { + Postgres, + Ipfs, + Ganache, +} + +impl ServiceContainerKind { + fn config(&self) -> container::Config<&'static str> { + let host_config = HostConfig { + publish_all_ports: Some(true), + ..Default::default() + }; + + match self { + Self::Postgres => container::Config { + image: Some(POSTGRES_IMAGE), + env: Some(vec![ + "POSTGRES_PASSWORD=password", + "POSTGRES_USER=postgres", + "POSTGRES_INITDB_ARGS=-E UTF8 --locale=C", + ]), + host_config: Some(host_config), + cmd: Some(vec![ + "postgres", + "-N", + "1000", + "-cshared_preload_libraries=pg_stat_statements", + ]), + ..Default::default() + }, + + Self::Ipfs => container::Config { + image: Some(IPFS_IMAGE), + host_config: Some(host_config), + ..Default::default() + }, + + Self::Ganache => container::Config { + image: Some(GANACHE_IMAGE), + cmd: Some(vec!["-d", "-l", "100000000000", "-g", "1"]), + host_config: Some(host_config), + ..Default::default() + }, + } + } + + pub fn name(&self) -> &str { + use ServiceContainerKind::*; + match self { + Postgres => "graph_node_integration_test_postgres", + Ipfs => "graph_node_integration_test_ipfs", + Ganache => "graph_node_integration_test_ganache", + } + } +} + +/// Handles the connection to the docker daemon and keeps track the service running inside it. +pub struct ServiceContainer { + client: Docker, + container_name: String, +} + +impl ServiceContainer { + pub async fn start(service: ServiceContainerKind) -> Result { + let client = + Docker::connect_with_local_defaults().expect("Failed to connect to docker daemon."); + let container_name = + format!("{}-{}", service.name(), uuid::Uuid::new_v4()).replace("-", "_"); + + let docker_test_client = Self { + container_name: container_name.clone(), + client, + }; + + docker_test_client + .client + .create_container( + Some(container::CreateContainerOptions { + name: container_name.clone(), + }), + service.config(), + ) + .await?; + + docker_test_client + .client + .start_container::<&'static str>(&container_name, None) + .await?; + + Ok(docker_test_client) + } + + pub fn container_name(&self) -> &str { + &self.container_name + } + + pub async fn stop(&self) -> Result<(), DockerError> { + kill_and_remove(&self.client, self.container_name()).await + } + + pub async fn exposed_ports(&self) -> Result { + use bollard::models::ContainerSummaryInner; + + let results = { + let mut filters = HashMap::new(); + filters.insert("name".to_string(), vec![self.container_name().to_string()]); + let options = Some(container::ListContainersOptions { + filters, + limit: Some(1), + ..Default::default() + }); + self.client.list_containers(options).await? + }; + + let ports = match &results.as_slice() { + &[ContainerSummaryInner { + ports: Some(ports), .. + }] => ports, + unexpected_response => panic!( + "Received a unexpected_response from docker API: {:#?}", + unexpected_response + ), + }; + + Ok(to_mapped_ports(ports.to_vec())) + } + + /// halts execution until a trigger message is detected on stdout or, optionally, + /// waits for a specified amount of time after the message appears. + pub async fn wait_for_message( + &self, + trigger_message: &[u8], + hard_wait: Duration, + ) -> Result<&Self, DockerError> { + // listen to container logs + let mut stream = self.client.logs::( + &self.container_name, + Some(container::LogsOptions { + follow: true, + stdout: true, + stderr: true, + ..Default::default() + }), + ); + + // halt execution until a message is received + loop { + match stream.next().await { + Some(Ok(log_line)) => { + if contains_subslice(log_line.to_string().as_bytes(), trigger_message) { + break; + } + } + Some(Err(error)) => return Err(error), + None => panic!("stream ended before expected message could be detected"), + } + } + + sleep(hard_wait).await; + Ok(self) + } + + /// Calls `docker exec` on the container to create a test database. + pub async fn create_postgres_database( + docker: &ServiceContainer, + db_name: &str, + ) -> Result<(), DockerError> { + use bollard::exec; + + // 1. Create Exec + let config = exec::CreateExecOptions { + cmd: Some(vec!["createdb", "-E", "UTF8", "--locale=C", &db_name]), + user: Some("postgres"), + attach_stdout: Some(true), + ..Default::default() + }; + + let message = docker + .client + .create_exec(docker.container_name(), config) + .await?; + + // 2. Start Exec + let mut stream = docker.client.start_exec(&message.id, None); + while let Some(_) = stream.next().await { /* consume stream */ } + + // 3. Inspect exec + let inspect = docker.client.inspect_exec(&message.id).await?; + match inspect.exit_code { + Some(0) => Ok(()), + code => panic!( + "failed to run 'createdb' command using docker exec (exit code: {:?})", + code + ), + } + } +} + +fn to_mapped_ports(input: Vec) -> MappedPorts { + let mut hashmap = HashMap::new(); + + for port in &input { + if let bollard::models::Port { + private_port, + public_port: Some(public_port), + .. + } = port + { + hashmap.insert(*private_port as u16, *public_port as u16); + } + } + if hashmap.is_empty() { + panic!("Container exposed no ports. Input={:?}", input) + } + hashmap +} diff --git a/tests/src/fixture/ethereum.rs b/tests/src/fixture/ethereum.rs index c44922a3de1..bbca9dfe297 100644 --- a/tests/src/fixture/ethereum.rs +++ b/tests/src/fixture/ethereum.rs @@ -1,14 +1,17 @@ use std::marker::PhantomData; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; +use std::time::Duration; use super::{ - test_ptr, NoopAdapterSelector, NoopRuntimeAdapter, StaticStreamBuilder, Stores, NODE_ID, + test_ptr, MutexBlockStreamBuilder, NoopAdapterSelector, NoopRuntimeAdapter, + StaticBlockRefetcher, StaticStreamBuilder, Stores, TestChain, NODE_ID, }; -use graph::blockchain::BlockPtr; +use graph::blockchain::{BlockPtr, TriggersAdapterSelector}; use graph::cheap_clone::CheapClone; -use graph::firehose::{FirehoseEndpoint, FirehoseEndpoints}; +use graph::firehose::{FirehoseEndpoint, FirehoseEndpoints, SubgraphLimit}; use graph::prelude::ethabi::ethereum_types::H256; -use graph::prelude::{LightEthereumBlock, LoggerFactory, NodeId}; +use graph::prelude::web3::types::{Address, Log, Transaction, H160}; +use graph::prelude::{ethabi, tiny_keccak, LightEthereumBlock, LoggerFactory, NodeId}; use graph::{blockchain::block_stream::BlockWithTriggers, prelude::ethabi::ethereum_types::U64}; use graph_chain_ethereum::network::EthereumNetworkAdapters; use graph_chain_ethereum::{ @@ -18,11 +21,19 @@ use graph_chain_ethereum::{ use graph_chain_ethereum::{Chain, ENV_VARS}; use graph_mock::MockMetricsRegistry; -pub async fn chain(blocks: Vec>, stores: &Stores) -> Chain { +pub async fn chain( + blocks: Vec>, + stores: &Stores, + triggers_adapter: Option>>, +) -> TestChain { + let triggers_adapter = triggers_adapter.unwrap_or(Arc::new(NoopAdapterSelector { + triggers_in_block_sleep: Duration::ZERO, + x: PhantomData, + })); let logger = graph::log::logger(true); - let logger_factory = LoggerFactory::new(logger.cheap_clone(), None); - let node_id = NodeId::new(NODE_ID).unwrap(); let mock_registry = Arc::new(MockMetricsRegistry::new()); + let logger_factory = LoggerFactory::new(logger.cheap_clone(), None, mock_registry.clone()); + let node_id = NodeId::new(NODE_ID).unwrap(); let chain_store = stores.chain_store.cheap_clone(); @@ -34,11 +45,14 @@ pub async fn chain(blocks: Vec>, stores: &Stores) -> Ch None, true, false, - 0, + SubgraphLimit::Unlimited, ))] .into(); - Chain::new( + let static_block_stream = Arc::new(StaticStreamBuilder { chain: blocks }); + let block_stream_builder = Arc::new(MutexBlockStreamBuilder(Mutex::new(static_block_stream))); + + let chain = Chain::new( logger_factory.clone(), stores.network_name.clone(), node_id, @@ -46,15 +60,24 @@ pub async fn chain(blocks: Vec>, stores: &Stores) -> Ch chain_store.cheap_clone(), chain_store, firehose_endpoints, - EthereumNetworkAdapters { adapters: vec![] }, + EthereumNetworkAdapters { + adapters: vec![], + call_only_adapters: vec![], + }, stores.chain_head_listener.cheap_clone(), - Arc::new(StaticStreamBuilder { chain: blocks }), - Arc::new(NoopAdapterSelector { x: PhantomData }), + block_stream_builder.clone(), + Arc::new(StaticBlockRefetcher { x: PhantomData }), + triggers_adapter, Arc::new(NoopRuntimeAdapter { x: PhantomData }), ENV_VARS.reorg_threshold, // We assume the tested chain is always ingestible for now true, - ) + ); + + TestChain { + chain: Arc::new(chain), + block_stream_builder, + } } pub fn genesis() -> BlockWithTriggers { @@ -76,13 +99,44 @@ pub fn empty_block( assert!(ptr != parent_ptr); assert!(ptr.number > parent_ptr.number); + // A 0x000.. transaction is used so `push_test_log` can use it + let transactions = vec![Transaction { + hash: H256::zero(), + block_hash: Some(H256::from_slice(ptr.hash.as_slice().into())), + block_number: Some(ptr.number.into()), + transaction_index: Some(0.into()), + from: Some(H160::zero()), + to: Some(H160::zero()), + ..Default::default() + }]; + BlockWithTriggers:: { block: BlockFinality::Final(Arc::new(LightEthereumBlock { hash: Some(H256::from_slice(ptr.hash.as_slice())), number: Some(U64::from(ptr.number)), parent_hash: H256::from_slice(parent_ptr.hash.as_slice()), + transactions, ..Default::default() })), trigger_data: vec![EthereumTrigger::Block(ptr, EthereumBlockTriggerType::Every)], } } + +pub fn push_test_log(block: &mut BlockWithTriggers, payload: impl Into) { + block.trigger_data.push(EthereumTrigger::Log( + Arc::new(Log { + address: Address::zero(), + topics: vec![tiny_keccak::keccak256(b"TestEvent(string)").into()], + data: ethabi::encode(&[ethabi::Token::String(payload.into())]).into(), + block_hash: Some(H256::from_slice(block.ptr().hash.as_slice())), + block_number: Some(block.ptr().number.into()), + transaction_hash: Some(H256::from_low_u64_be(0).into()), + transaction_index: Some(0.into()), + log_index: Some(0.into()), + transaction_log_index: Some(0.into()), + log_type: None, + removed: None, + }), + None, + )) +} diff --git a/tests/src/fixture.rs b/tests/src/fixture/mod.rs similarity index 56% rename from tests/src/fixture.rs rename to tests/src/fixture/mod.rs index 2a5f2b7562f..52405453952 100644 --- a/tests/src/fixture.rs +++ b/tests/src/fixture/mod.rs @@ -1,16 +1,15 @@ pub mod ethereum; use std::marker::PhantomData; -use std::process::Command; use std::sync::Mutex; use std::time::Duration; -use crate::helpers::run_cmd; use anyhow::Error; use async_stream::stream; use futures::{Stream, StreamExt}; use graph::blockchain::block_stream::{ - BlockStream, BlockStreamBuilder, BlockStreamEvent, BlockWithTriggers, FirehoseCursor, + BlockRefetcher, BlockStream, BlockStreamBuilder, BlockStreamEvent, BlockWithTriggers, + FirehoseCursor, }; use graph::blockchain::{ Block, BlockHash, BlockPtr, Blockchain, BlockchainMap, ChainIdentifier, RuntimeAdapter, @@ -20,25 +19,30 @@ use graph::cheap_clone::CheapClone; use graph::components::store::{BlockStore, DeploymentLocator}; use graph::data::graphql::effort::LoadManager; use graph::data::query::{Query, QueryTarget}; -use graph::env::ENV_VARS; +use graph::data::subgraph::schema::{SubgraphError, SubgraphHealth}; +use graph::env::EnvVars; use graph::ipfs_client::IpfsClient; use graph::prelude::ethabi::ethereum_types::H256; +use graph::prelude::serde_json::{self, json}; use graph::prelude::{ - async_trait, r, ApiVersion, BlockNumber, DeploymentHash, GraphQlRunner as _, LoggerFactory, - MetricsRegistry, NodeId, QueryError, SubgraphAssignmentProvider, SubgraphName, - SubgraphRegistrar, SubgraphStore as _, SubgraphVersionSwitchingMode, + async_trait, r, ApiVersion, BigInt, BlockNumber, DeploymentHash, GraphQlRunner as _, + LoggerFactory, MetricsRegistry, NodeId, QueryError, SubgraphAssignmentProvider, SubgraphName, + SubgraphRegistrar, SubgraphStore as _, SubgraphVersionSwitchingMode, TriggerProcessor, }; -use graph_core::polling_monitor::ipfs_service::IpfsService; +use graph::slog::crit; +use graph_core::polling_monitor::ipfs_service; use graph_core::{ LinkResolver, SubgraphAssignmentProvider as IpfsSubgraphAssignmentProvider, - SubgraphInstanceManager, SubgraphRegistrar as IpfsSubgraphRegistrar, + SubgraphInstanceManager, SubgraphRegistrar as IpfsSubgraphRegistrar, SubgraphTriggerProcessor, }; -use graph_graphql::prelude::GraphQlRunner; use graph_mock::MockMetricsRegistry; use graph_node::manager::PanicSubscriptionManager; use graph_node::{config::Config, store_builder::StoreBuilder}; +use graph_runtime_wasm::RuntimeHostBuilder; +use graph_server_index_node::IndexNodeService; use graph_store_postgres::{ChainHeadUpdateListener, ChainStore, Store, SubgraphStore}; -use slog::{crit, info, Logger}; +use serde::Deserialize; +use slog::{info, o, Discard, Logger}; use std::env::VarError; use std::pin::Pin; use std::sync::Arc; @@ -47,57 +51,37 @@ use tokio::fs::read_to_string; const NODE_ID: &str = "default"; -pub async fn build_subgraph(dir: &str) -> DeploymentHash { - build_subgraph_with_yarn_cmd(dir, "deploy:test").await +pub fn test_ptr(n: BlockNumber) -> BlockPtr { + test_ptr_reorged(n, 0) } -pub async fn build_subgraph_with_yarn_cmd(dir: &str, yarn_cmd: &str) -> DeploymentHash { - // Test that IPFS is up. - IpfsClient::localhost() - .test() - .await - .expect("Could not connect to IPFS, make sure it's running at port 5001"); - - // Make sure dependencies are present. - run_cmd( - Command::new("yarn") - .arg("install") - .arg("--mutex") - .arg("file:.yarn-mutex") - .current_dir("./integration-tests"), - ); +// Set n as the low bits and `reorg_n` as the high bits of the hash. +pub fn test_ptr_reorged(n: BlockNumber, reorg_n: u32) -> BlockPtr { + let mut hash = H256::from_low_u64_be(n as u64); + hash[0..4].copy_from_slice(&reorg_n.to_be_bytes()); + BlockPtr { + hash: hash.into(), + number: n, + } +} - // Run codegen. - run_cmd(Command::new("yarn").arg("codegen").current_dir(&dir)); - - // Run `deploy` for the side effect of uploading to IPFS, the graph node url - // is fake and the actual deploy call is meant to fail. - let deploy_output = run_cmd( - Command::new("yarn") - .arg(yarn_cmd) - .env("IPFS_URI", "http://127.0.0.1:5001") - .env("GRAPH_NODE_ADMIN_URI", "http://localhost:0") - .current_dir(dir), - ); +type GraphQlRunner = graph_graphql::prelude::GraphQlRunner; - // Hack to extract deployment id from `graph deploy` output. - const ID_PREFIX: &str = "Build completed: "; - let mut line = deploy_output - .lines() - .find(|line| line.contains(ID_PREFIX)) - .expect("found no matching line"); - if !line.starts_with(ID_PREFIX) { - line = &line[5..line.len() - 5]; // workaround for colored output - } - DeploymentHash::new(line.trim_start_matches(ID_PREFIX)).unwrap() +pub struct TestChain { + pub chain: Arc, + pub block_stream_builder: Arc>, } -pub fn test_ptr(n: BlockNumber) -> BlockPtr { - BlockPtr { - hash: H256::from_low_u64_be(n as u64).into(), - number: n, +impl TestChain { + pub fn set_block_stream(&self, blocks: Vec>) + where + C::TriggerData: Clone, + { + let static_block_stream = Arc::new(StaticStreamBuilder { chain: blocks }); + *self.block_stream_builder.0.lock().unwrap() = static_block_stream; } } + pub struct TestContext { pub logger: Logger, pub provider: Arc< @@ -108,38 +92,160 @@ pub struct TestContext { pub store: Arc, pub deployment: DeploymentLocator, pub subgraph_name: SubgraphName, - graphql_runner: Arc>, + pub instance_manager: SubgraphInstanceManager, + pub link_resolver: Arc, + pub env_vars: Arc, + pub ipfs: IpfsClient, + graphql_runner: Arc, + indexing_status_service: Arc>, +} + +#[derive(Deserialize)] +pub struct IndexingStatusBlock { + pub number: BigInt, +} + +#[derive(Deserialize)] +pub struct IndexingStatusError { + pub deterministic: bool, + pub block: IndexingStatusBlock, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct IndexingStatus { + pub health: SubgraphHealth, + pub entity_count: BigInt, + pub fatal_error: Option, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct IndexingStatusForCurrentVersion { + pub indexing_status_for_current_version: IndexingStatus, } impl TestContext { + pub async fn runner( + &self, + stop_block: BlockPtr, + ) -> graph_core::SubgraphRunner< + graph_chain_ethereum::Chain, + RuntimeHostBuilder, + > { + let logger = self.logger.cheap_clone(); + let deployment = self.deployment.cheap_clone(); + + // Stolen from the IPFS provider, there's prolly a nicer way to re-use it + let file_bytes = self + .link_resolver + .cat(&logger, &deployment.hash.to_ipfs_link()) + .await + .unwrap(); + + let raw: serde_yaml::Mapping = serde_yaml::from_slice(&file_bytes).unwrap(); + let tp: Box> = Box::new(SubgraphTriggerProcessor {}); + + self.instance_manager + .build_subgraph_runner( + logger, + self.env_vars.cheap_clone(), + deployment, + raw, + Some(stop_block.block_number()), + tp, + ) + .await + .unwrap() + } + pub async fn start_and_sync_to(&self, stop_block: BlockPtr) { + // In case the subgraph has been previously started. + self.provider.stop(self.deployment.clone()).await.unwrap(); + self.provider .start(self.deployment.clone(), Some(stop_block.number)) .await .expect("unable to start subgraph"); - wait_for_sync(&self.logger, &self.store, &self.deployment.hash, stop_block) + wait_for_sync( + &self.logger, + &self.store, + &self.deployment.clone(), + stop_block, + ) + .await + .unwrap(); + } + + pub async fn start_and_sync_to_error(&self, stop_block: BlockPtr) -> SubgraphError { + // In case the subgraph has been previously started. + self.provider.stop(self.deployment.clone()).await.unwrap(); + + self.provider + .start(self.deployment.clone(), None) .await - .unwrap(); + .expect("unable to start subgraph"); + + wait_for_sync( + &self.logger, + &self.store, + &self.deployment.clone(), + stop_block, + ) + .await + .unwrap_err() } pub async fn query(&self, query: &str) -> Result, Vec> { let target = QueryTarget::Deployment(self.deployment.hash.clone(), ApiVersion::default()); + let query = Query::new( + graphql_parser::parse_query(query).unwrap().into_static(), + None, + false, + ); + let query_res = self.graphql_runner.clone().run_query(query, target).await; + query_res.first().unwrap().duplicate().to_result() + } - self.graphql_runner - .clone() - .run_query( - Query::new( - graphql_parser::parse_query(query).unwrap().into_static(), - None, - ), - target, - ) - .await + pub async fn indexing_status(&self) -> IndexingStatus { + let query = format!( + r#" + {{ + indexingStatusForCurrentVersion(subgraphName: "{}") {{ + health + entityCount + fatalError {{ + deterministic + block {{ + number + }} + }} + }} + }} + "#, + &self.subgraph_name + ); + let body = json!({ "query": query }).to_string(); + let req = hyper::Request::new(body.into()); + let res = self.indexing_status_service.handle_graphql_query(req).await; + let value = res + .unwrap() .first() .unwrap() .duplicate() .to_result() + .unwrap() + .unwrap(); + let query_res: IndexingStatusForCurrentVersion = + serde_json::from_str(&serde_json::to_string(&value).unwrap()).unwrap(); + query_res.indexing_status_for_current_version + } + + pub fn rewind(&self, block_ptr_to: BlockPtr) { + self.store + .rewind(self.deployment.hash.clone(), block_ptr_to) + .unwrap() } } @@ -214,12 +320,18 @@ pub async fn setup( subgraph_name: SubgraphName, hash: &DeploymentHash, stores: &Stores, - chain: Arc, + chain: &TestChain, graft_block: Option, + env_vars: Option, ) -> TestContext { + let env_vars = Arc::new(match env_vars { + Some(ev) => ev, + None => EnvVars::from_env().unwrap(), + }); + let logger = graph::log::logger(true); - let logger_factory = LoggerFactory::new(logger.clone(), None); let mock_registry: Arc = Arc::new(MockMetricsRegistry::new()); + let logger_factory = LoggerFactory::new(logger.clone(), None, mock_registry.clone()); let node_id = NodeId::new(NODE_ID).unwrap(); // Make sure we're starting from a clean state. @@ -227,25 +339,26 @@ pub async fn setup( cleanup(&subgraph_store, &subgraph_name, hash).unwrap(); let mut blockchain_map = BlockchainMap::new(); - blockchain_map.insert(stores.network_name.clone(), chain); + blockchain_map.insert(stores.network_name.clone(), chain.chain.clone()); - let static_filters = ENV_VARS.experimental_static_filters; + let static_filters = env_vars.experimental_static_filters; let ipfs = IpfsClient::localhost(); let link_resolver = Arc::new(LinkResolver::new( vec![ipfs.cheap_clone()], Default::default(), )); - let ipfs_service = IpfsService::new( - ipfs, - ENV_VARS.mappings.max_ipfs_file_bytes as u64, - ENV_VARS.mappings.ipfs_timeout, - ENV_VARS.mappings.max_ipfs_concurrent_requests, + let ipfs_service = ipfs_service( + ipfs.cheap_clone(), + env_vars.mappings.max_ipfs_file_bytes as u64, + env_vars.mappings.ipfs_timeout, + env_vars.mappings.ipfs_request_limit, ); let blockchain_map = Arc::new(blockchain_map); let subgraph_instance_manager = SubgraphInstanceManager::new( &logger_factory, + env_vars.cheap_clone(), subgraph_store.clone(), blockchain_map.clone(), mock_registry.clone(), @@ -265,11 +378,19 @@ pub async fn setup( mock_registry.clone(), )); + let indexing_status_service = Arc::new(IndexNodeService::new( + logger.cheap_clone(), + blockchain_map.cheap_clone(), + graphql_runner.cheap_clone(), + stores.network_store.cheap_clone(), + link_resolver.cheap_clone(), + )); + // Create IPFS-based subgraph provider let subgraph_provider = Arc::new(IpfsSubgraphAssignmentProvider::new( &logger_factory, link_resolver.cheap_clone(), - subgraph_instance_manager, + subgraph_instance_manager.clone(), )); let panicking_subscription_manager = Arc::new(PanicSubscriptionManager {}); @@ -308,6 +429,11 @@ pub async fn setup( deployment, subgraph_name, graphql_runner, + instance_manager: subgraph_instance_manager, + link_resolver, + env_vars, + indexing_status_service, + ipfs, } } @@ -327,14 +453,14 @@ pub fn cleanup( pub async fn wait_for_sync( logger: &Logger, store: &SubgraphStore, - hash: &DeploymentHash, + deployment: &DeploymentLocator, stop_block: BlockPtr, -) -> Result<(), Error> { +) -> Result<(), SubgraphError> { let mut err_count = 0; while err_count < 10 { tokio::time::sleep(Duration::from_millis(1000)).await; - let block_ptr = match store.least_block_ptr(&hash).await { + let block_ptr = match store.least_block_ptr(&deployment.hash).await { Ok(Some(ptr)) => ptr, res => { info!(&logger, "{:?}", res); @@ -343,20 +469,89 @@ pub async fn wait_for_sync( } }; - if block_ptr == stop_block { - break; + let status = store.status_for_id(deployment.id); + + if let Some(fatal_error) = status.fatal_error { + if fatal_error.block_ptr.as_ref().unwrap() == &stop_block { + return Err(fatal_error); + } } - if !store.is_healthy(&hash).await.unwrap() { - return Err(anyhow::anyhow!("subgraph failed unexpectedly")); + if block_ptr == stop_block { + info!(logger, "TEST: reached stop block"); + break; } } Ok(()) } +struct StaticBlockRefetcher { + x: PhantomData, +} + +#[async_trait] +impl BlockRefetcher for StaticBlockRefetcher { + fn required(&self, _chain: &C) -> bool { + false + } + + async fn get_block( + &self, + _chain: &C, + _logger: &Logger, + _cursor: FirehoseCursor, + ) -> Result { + unimplemented!("this block refetcher always returns false, get_block shouldn't be called") + } +} + +pub struct MutexBlockStreamBuilder(pub Mutex>>); + +#[async_trait] +impl BlockStreamBuilder for MutexBlockStreamBuilder { + async fn build_firehose( + &self, + chain: &C, + deployment: DeploymentLocator, + block_cursor: FirehoseCursor, + start_blocks: Vec, + subgraph_current_block: Option, + filter: Arc<::TriggerFilter>, + unified_api_version: graph::data::subgraph::UnifiedMappingApiVersion, + ) -> anyhow::Result>> { + let builder = self.0.lock().unwrap().clone(); + + builder + .build_firehose( + chain, + deployment, + block_cursor, + start_blocks, + subgraph_current_block, + filter, + unified_api_version, + ) + .await + } + + async fn build_polling( + &self, + _chain: Arc, + _deployment: DeploymentLocator, + _start_blocks: Vec, + _subgraph_current_block: Option, + _filter: Arc<::TriggerFilter>, + _unified_api_version: graph::data::subgraph::UnifiedMappingApiVersion, + ) -> anyhow::Result>> { + unimplemented!("only firehose mode should be used for tests") + } +} + /// `chain` is the sequence of chain heads to be processed. If the next block to be processed in the /// chain is not a descendant of the previous one, reorgs will be emitted until it is. +/// +/// If the stream is reset, emitted reorged blocks will not be emitted again. /// See also: static-stream-builder struct StaticStreamBuilder { chain: Vec>, @@ -465,8 +660,9 @@ impl RuntimeAdapter for NoopRuntimeAdapter { } } -struct NoopAdapterSelector { - x: PhantomData, +pub struct NoopAdapterSelector { + pub x: PhantomData, + pub triggers_in_block_sleep: Duration, } impl TriggersAdapterSelector for NoopAdapterSelector { @@ -476,16 +672,50 @@ impl TriggersAdapterSelector for NoopAdapterSelector { _capabilities: &::NodeCapabilities, _unified_api_version: graph::data::subgraph::UnifiedMappingApiVersion, ) -> Result>, Error> { - Ok(Arc::new(NoopTriggersAdapter { x: PhantomData })) + // Return no triggers on data source reprocessing. + let triggers_in_block = Arc::new(|block| { + let logger = Logger::root(Discard, o!()); + Ok(BlockWithTriggers::new(block, Vec::new(), &logger)) + }); + Ok(Arc::new(MockTriggersAdapter { + x: PhantomData, + triggers_in_block, + triggers_in_block_sleep: self.triggers_in_block_sleep, + })) + } +} + +pub struct MockAdapterSelector { + pub x: PhantomData, + pub triggers_in_block_sleep: Duration, + pub triggers_in_block: + Arc::Block) -> Result, Error> + Sync + Send>, +} + +impl TriggersAdapterSelector for MockAdapterSelector { + fn triggers_adapter( + &self, + _loc: &DeploymentLocator, + _capabilities: &::NodeCapabilities, + _unified_api_version: graph::data::subgraph::UnifiedMappingApiVersion, + ) -> Result>, Error> { + Ok(Arc::new(MockTriggersAdapter { + x: PhantomData, + triggers_in_block: self.triggers_in_block.clone(), + triggers_in_block_sleep: self.triggers_in_block_sleep, + })) } } -struct NoopTriggersAdapter { +struct MockTriggersAdapter { x: PhantomData, + triggers_in_block_sleep: Duration, + triggers_in_block: + Arc::Block) -> Result, Error> + Sync + Send>, } #[async_trait] -impl TriggersAdapter for NoopTriggersAdapter { +impl TriggersAdapter for MockTriggersAdapter { async fn ancestor_block( &self, _ptr: BlockPtr, @@ -509,8 +739,9 @@ impl TriggersAdapter for NoopTriggersAdapter { block: ::Block, _filter: &::TriggerFilter, ) -> Result, Error> { - // Return no triggers on data source reprocessing. - Ok(BlockWithTriggers::new(block, Vec::new())) + tokio::time::sleep(self.triggers_in_block_sleep).await; + + (self.triggers_in_block)(block) } async fn is_on_main_chain(&self, _ptr: BlockPtr) -> Result { diff --git a/tests/src/helpers.rs b/tests/src/helpers.rs index 02597b183d4..21d520c00d2 100644 --- a/tests/src/helpers.rs +++ b/tests/src/helpers.rs @@ -1,26 +1,18 @@ use std::collections::HashMap; use std::ffi::OsStr; -use std::io::{self, BufRead}; +use std::net::TcpListener; use std::path::Path; use std::process::Command; -use std::sync::atomic::{AtomicU16, Ordering}; +use std::sync::atomic::AtomicU16; use anyhow::Context; -/// A counter for uniquely naming Ganache containers -static GANACHE_CONTAINER_COUNT: AtomicU16 = AtomicU16::new(0); -/// A counter for uniquely naming Postgres databases -static POSTGRES_DATABASE_COUNT: AtomicU16 = AtomicU16::new(0); -/// A counter for uniquely assigning ports. -static PORT_NUMBER_COUNTER: AtomicU16 = AtomicU16::new(10_000); - const POSTGRESQL_DEFAULT_PORT: u16 = 5432; const GANACHE_DEFAULT_PORT: u16 = 8545; const IPFS_DEFAULT_PORT: u16 = 5001; /// Maps `Service => Host` exposed ports. -#[derive(Debug)] -pub struct MappedPorts(pub HashMap); +pub type MappedPorts = HashMap; /// Strip parent directories from filenames pub fn basename(path: &impl AsRef) -> String { @@ -31,45 +23,44 @@ pub fn basename(path: &impl AsRef) -> String { .expect("failed to infer basename for path.") } -/// Fetches a unique number for naming Ganache containers -pub fn get_unique_ganache_counter() -> u16 { - increase_atomic_counter(&GANACHE_CONTAINER_COUNT) -} -/// Fetches a unique number for naming Postgres databases -pub fn get_unique_postgres_counter() -> u16 { - increase_atomic_counter(&POSTGRES_DATABASE_COUNT) -} -/// Fetches a unique port number -pub fn get_unique_port_number() -> u16 { - increase_atomic_counter(&PORT_NUMBER_COUNTER) +/// Parses stdout bytes into a prefixed String +pub fn pretty_output(blob: &[u8], prefix: &str) -> String { + blob.split(|b| *b == b'\n') + .map(String::from_utf8_lossy) + .map(|line| format!("{}{}", prefix, line)) + .collect::>() + .join("\n") } -fn increase_atomic_counter(counter: &'static AtomicU16) -> u16 { - let old_count = counter.fetch_add(1, Ordering::SeqCst); - old_count + 1 -} +/// Finds and returns a free port. Ports are never *guaranteed* to be free because of +/// [TOCTOU](https://en.wikipedia.org/wiki/Time-of-check_to_time-of-use) race +/// conditions. +/// +/// This function guards against conflicts coming from other callers, so you +/// will only get port conflicts from external resources. +fn get_free_port() -> u16 { + // We start cycling through ports at 10000, which is high enough in the port + // space to to cause few conflicts. + const RANGE_START: u16 = 10_000; + static COUNTER: AtomicU16 = AtomicU16::new(RANGE_START); -/// Parses stdio bytes into a prefixed String -pub fn pretty_output(stdio: &[u8], prefix: &str) -> String { - let mut cursor = io::Cursor::new(stdio); - let mut buf = vec![]; - let mut string = String::new(); loop { - buf.clear(); - let bytes_read = cursor - .read_until(b'\n', &mut buf) - .expect("failed to read from stdio."); - if bytes_read == 0 { - break; + let ordering = std::sync::atomic::Ordering::SeqCst; + let port = COUNTER.fetch_add(1, ordering); + if port < RANGE_START { + // We've wrapped around, start over. + COUNTER.store(RANGE_START, ordering); + continue; + } + + let bind = TcpListener::bind(("127.0.0.1", port)); + if bind.is_ok() { + return port; } - let as_string = String::from_utf8_lossy(&buf); - string.push_str(&prefix); - string.push_str(&as_string); // will contain a newline } - string } -#[derive(Debug)] +#[derive(Debug, Copy, Clone)] pub struct GraphNodePorts { pub http: u16, pub index: u16, @@ -77,31 +68,23 @@ pub struct GraphNodePorts { pub admin: u16, pub metrics: u16, } + impl GraphNodePorts { - /// Returns five available port numbers, using dynamic port ranges - pub fn get_ports() -> GraphNodePorts { - let mut ports = [0u16; 5]; - for port in ports.iter_mut() { - let min = get_unique_port_number(); - let max = min + 1_000; - let free_port_in_range = port_check::free_local_port_in_range(min, max) - .expect("failed to obtain a free port in range"); - *port = free_port_in_range; - } - GraphNodePorts { - http: ports[0], - index: ports[1], - ws: ports[2], - admin: ports[3], - metrics: ports[4], + /// Populates all values with random free ports. + pub fn random_free() -> GraphNodePorts { + Self { + http: get_free_port(), + index: get_free_port(), + ws: get_free_port(), + admin: get_free_port(), + metrics: get_free_port(), } } } // Build a postgres connection string -pub fn make_postgres_uri(unique_id: &u16, postgres_ports: &MappedPorts) -> String { +pub fn make_postgres_uri(db_name: &str, postgres_ports: &MappedPorts) -> String { let port = postgres_ports - .0 .get(&POSTGRESQL_DEFAULT_PORT) .expect("failed to fetch Postgres port from mapped ports"); format!( @@ -110,13 +93,12 @@ pub fn make_postgres_uri(unique_id: &u16, postgres_ports: &MappedPorts) -> Strin password = "password", host = "localhost", port = port, - database_name = postgres_test_database_name(unique_id), + database_name = db_name, ) } pub fn make_ipfs_uri(ipfs_ports: &MappedPorts) -> String { let port = ipfs_ports - .0 .get(&IPFS_DEFAULT_PORT) .expect("failed to fetch IPFS port from mapped ports"); format!("http://{host}:{port}", host = "localhost", port = port) @@ -125,7 +107,6 @@ pub fn make_ipfs_uri(ipfs_ports: &MappedPorts) -> String { // Build a Ganache connection string. Returns the port number and the URI. pub fn make_ganache_uri(ganache_ports: &MappedPorts) -> (u16, String) { let port = ganache_ports - .0 .get(&GANACHE_DEFAULT_PORT) .expect("failed to fetch Ganache port from mapped ports"); let uri = format!("test:http://{host}:{port}", host = "localhost", port = port); @@ -136,10 +117,6 @@ pub fn contains_subslice(data: &[T], needle: &[T]) -> bool { data.windows(needle.len()).any(|w| w == needle) } -pub fn postgres_test_database_name(unique_id: &u16) -> String { - format!("test_database_{}", unique_id) -} - /// Returns captured stdout pub fn run_cmd(command: &mut Command) -> String { let program = command.get_program().to_str().unwrap().to_owned(); @@ -148,11 +125,11 @@ pub fn run_cmd(command: &mut Command) -> String { .context(format!("failed to run {}", program)) .unwrap(); println!( - "stdout {}", + "stdout:\n{}", pretty_output(&output.stdout, &format!("[{}:stdout] ", program)) ); println!( - "stderr {}", + "stderr:\n{}", pretty_output(&output.stderr, &format!("[{}:stderr] ", program)) ); diff --git a/tests/src/lib.rs b/tests/src/lib.rs index bdb39eeee34..3a3b4a87659 100644 --- a/tests/src/lib.rs +++ b/tests/src/lib.rs @@ -1,2 +1,3 @@ +pub mod docker_utils; pub mod fixture; pub mod helpers; diff --git a/tests/tests/common/docker.rs b/tests/tests/common/docker.rs deleted file mode 100644 index e2231a4b413..00000000000 --- a/tests/tests/common/docker.rs +++ /dev/null @@ -1,285 +0,0 @@ -use bollard::image::CreateImageOptions; -use bollard::models::HostConfig; -use bollard::{container, Docker}; -use graph_tests::helpers::{contains_subslice, postgres_test_database_name, MappedPorts}; -use std::collections::HashMap; -use tokio::time::{sleep, Duration}; -use tokio_stream::StreamExt; - -const POSTGRES_IMAGE: &'static str = "postgres:latest"; -const IPFS_IMAGE: &'static str = "ipfs/go-ipfs:v0.10.0"; -const GANACHE_IMAGE: &'static str = "trufflesuite/ganache-cli:latest"; -type DockerError = bollard::errors::Error; - -pub async fn pull_images() { - use tokio_stream::StreamMap; - - let client = - Docker::connect_with_local_defaults().expect("Failed to connect to docker daemon."); - - let images = [POSTGRES_IMAGE, IPFS_IMAGE, GANACHE_IMAGE]; - let mut map = StreamMap::new(); - - for image_name in &images { - let options = Some(CreateImageOptions { - from_image: *image_name, - ..Default::default() - }); - let stream = client.create_image(options, None, None); - map.insert(*image_name, stream); - } - - while let Some(message) = map.next().await { - if let (key, Err(msg)) = message { - panic!("Error when pulling docker image for {}: {}", key, msg) - } - } -} - -pub async fn stop_and_remove(client: &Docker, service_name: &str) -> Result<(), DockerError> { - client.kill_container::<&str>(service_name, None).await?; - client.remove_container(service_name, None).await -} - -/// Represents all possible service containers to be spawned -#[derive(Debug)] -pub enum TestContainerService { - Postgres, - Ipfs, - Ganache(u16), -} - -impl TestContainerService { - fn config(&self) -> container::Config<&'static str> { - use TestContainerService::*; - match self { - Postgres => Self::build_postgres_container_config(), - Ipfs => Self::build_ipfs_container_config(), - Ganache(_u32) => Self::build_ganache_container_config(), - } - } - - fn options(&self) -> container::CreateContainerOptions { - container::CreateContainerOptions { name: self.name() } - } - - fn name(&self) -> String { - use TestContainerService::*; - match self { - Postgres => "graph_node_integration_test_postgres".into(), - Ipfs => "graph_node_integration_test_ipfs".into(), - Ganache(container_count) => { - format!("graph_node_integration_test_ganache_{}", container_count) - } - } - } - - fn build_postgres_container_config() -> container::Config<&'static str> { - let host_config = HostConfig { - publish_all_ports: Some(true), - ..Default::default() - }; - - container::Config { - image: Some(POSTGRES_IMAGE), - env: Some(vec!["POSTGRES_PASSWORD=password", "POSTGRES_USER=postgres"]), - host_config: Some(host_config), - cmd: Some(vec![ - "postgres", - "-N", - "1000", - "-cshared_preload_libraries=pg_stat_statements", - ]), - ..Default::default() - } - } - - fn build_ipfs_container_config() -> container::Config<&'static str> { - let host_config = HostConfig { - publish_all_ports: Some(true), - ..Default::default() - }; - - container::Config { - image: Some(IPFS_IMAGE), - host_config: Some(host_config), - ..Default::default() - } - } - - fn build_ganache_container_config() -> container::Config<&'static str> { - let host_config = HostConfig { - publish_all_ports: Some(true), - ..Default::default() - }; - - container::Config { - image: Some(GANACHE_IMAGE), - cmd: Some(vec!["-d", "-l", "100000000000", "-g", "1"]), - host_config: Some(host_config), - ..Default::default() - } - } -} - -/// Handles the connection to the docker daemon and keeps track the service running inside it. -pub struct DockerTestClient { - service: TestContainerService, - client: Docker, -} - -impl DockerTestClient { - pub async fn start(service: TestContainerService) -> Result { - let client = - Docker::connect_with_local_defaults().expect("Failed to connect to docker daemon."); - - let docker_test_client = Self { service, client }; - - // try to remove the container if it already exists - let _ = stop_and_remove( - &docker_test_client.client, - &docker_test_client.service.name(), - ) - .await; - - // create docker container - docker_test_client - .client - .create_container( - Some(docker_test_client.service.options()), - docker_test_client.service.config(), - ) - .await?; - - // start docker container - docker_test_client - .client - .start_container::<&'static str>(&docker_test_client.service.name(), None) - .await?; - - Ok(docker_test_client) - } - - pub async fn stop(&self) -> Result<(), DockerError> { - stop_and_remove(&self.client, &self.service.name()).await - } - - pub async fn exposed_ports(&self) -> Result { - use bollard::models::ContainerSummaryInner; - let mut filters = HashMap::new(); - filters.insert("name".to_string(), vec![self.service.name()]); - let options = Some(container::ListContainersOptions { - filters, - limit: Some(1), - ..Default::default() - }); - let results = self.client.list_containers(options).await?; - let ports = match &results.as_slice() { - &[ContainerSummaryInner { - ports: Some(ports), .. - }] => ports, - unexpected_response => panic!( - "Received a unexpected_response from docker API: {:#?}", - unexpected_response - ), - }; - let mapped_ports: MappedPorts = to_mapped_ports(ports.to_vec()); - Ok(mapped_ports) - } - - /// halts execution until a trigger message is detected on stdout or, optionally, - /// waits for a specified amount of time after the message appears. - pub async fn wait_for_message( - &self, - trigger_message: &[u8], - hard_wait: &Option, - ) -> Result<&Self, DockerError> { - // listen to container logs - let mut stream = self.client.logs::( - &self.service.name(), - Some(container::LogsOptions { - follow: true, - stdout: true, - stderr: true, - ..Default::default() - }), - ); - - // halt execution until a message is received - loop { - match stream.next().await { - Some(Ok(container::LogOutput::StdOut { message })) => { - if contains_subslice(&message, &trigger_message) { - break; - } else { - sleep(Duration::from_millis(100)).await; - } - } - Some(Err(error)) => return Err(error), - None => { - panic!("stream ended before expected message could be detected") - } - _ => {} - } - } - - if let Some(seconds) = hard_wait { - sleep(Duration::from_secs(*seconds)).await; - } - Ok(self) - } - - /// Calls `docker exec` on the container to create a test database. - pub async fn create_postgres_database( - docker: &DockerTestClient, - unique_id: &u16, - ) -> Result<(), DockerError> { - use bollard::exec; - - let database_name = postgres_test_database_name(unique_id); - - // 1. Create Exec - let config = exec::CreateExecOptions { - cmd: Some(vec!["createdb", &database_name]), - user: Some("postgres"), - attach_stdout: Some(true), - ..Default::default() - }; - - let message = docker - .client - .create_exec(&docker.service.name(), config) - .await?; - - // 2. Start Exec - let mut stream = docker.client.start_exec(&message.id, None); - while let Some(_) = stream.next().await { /* consume stream */ } - - // 3. Inspecet exec - let inspect = docker.client.inspect_exec(&message.id).await?; - if let Some(0) = inspect.exit_code { - Ok(()) - } else { - panic!("failed to run 'createdb' command using docker exec"); - } - } -} - -fn to_mapped_ports(input: Vec) -> MappedPorts { - let mut hashmap = HashMap::new(); - - for port in &input { - if let bollard::models::Port { - private_port, - public_port: Some(public_port), - .. - } = port - { - hashmap.insert(*private_port as u16, *public_port as u16); - } - } - if hashmap.is_empty() { - panic!("Container exposed no ports. Input={:?}", input) - } - MappedPorts(hashmap) -} diff --git a/tests/tests/common/mod.rs b/tests/tests/common/mod.rs deleted file mode 100644 index 18c50280380..00000000000 --- a/tests/tests/common/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod docker; diff --git a/tests/tests/integration_tests.rs b/tests/tests/integration_tests.rs new file mode 100644 index 00000000000..2f11094d754 --- /dev/null +++ b/tests/tests/integration_tests.rs @@ -0,0 +1,501 @@ +//! Containeraized integration tests. +//! +//! # On the use of [`tokio::join!`] +//! +//! While linear `.await`s look best, sometimes we don't particularly care +//! about the order of execution and we can thus reduce test execution times by +//! `.await`ing in parallel. [`tokio::join!`] and similar macros can help us +//! with that, at the cost of some readability. As a general rule only a few +//! tasks are really worth parallelizing, and applying this trick +//! indiscriminately will only result in messy code and diminishing returns. + +use anyhow::Context; +use futures::{StreamExt, TryStreamExt}; +use graph_tests::docker_utils::{pull_service_images, ServiceContainer, ServiceContainerKind}; +use graph_tests::helpers::{ + basename, make_ganache_uri, make_ipfs_uri, make_postgres_uri, pretty_output, GraphNodePorts, + MappedPorts, +}; +use std::fs; +use std::num::NonZeroUsize; +use std::path::{Path, PathBuf}; +use std::sync::Arc; +use std::time::Duration; +use tokio::io::AsyncReadExt; +use tokio::process::{Child, Command}; + +/// All directories containing integration tests to run. +/// +/// Hardcoding these paths seems "wrong", and we very well could obtain this +/// list with some directory listing magic. That would, however, also +/// require us to filter out `node_modules`, support files, etc.. Hardly worth +/// it. +pub const INTEGRATION_TEST_DIRS: &[&str] = &[ + "api-version-v0-0-4", + "ganache-reverts", + "host-exports", + "non-fatal-errors", + "overloaded-contract-functions", + "poi-for-failed-subgraph", + "remove-then-update", + "value-roundtrip", +]; + +#[derive(Debug, Clone)] +struct IntegrationTestSettings { + n_parallel_tests: u64, + ganache_hard_wait: Duration, + ipfs_hard_wait: Duration, + postgres_hard_wait: Duration, +} + +impl IntegrationTestSettings { + /// Automatically fills in missing env. vars. with defaults. + /// + /// # Panics + /// + /// Panics if any of the env. vars. is set incorrectly. + pub fn from_env() -> Self { + Self { + n_parallel_tests: parse_numeric_environment_variable("N_CONCURRENT_TESTS").unwrap_or( + // Lots of I/O going on in these tests, so we spawn twice as + // many jobs as suggested. + 2 * std::thread::available_parallelism() + .map(NonZeroUsize::get) + .unwrap_or(2) as u64, + ), + ganache_hard_wait: Duration::from_secs( + parse_numeric_environment_variable("TESTS_GANACHE_HARD_WAIT_SECONDS").unwrap_or(0), + ), + ipfs_hard_wait: Duration::from_secs( + parse_numeric_environment_variable("TESTS_IPFS_HARD_WAIT_SECONDS").unwrap_or(0), + ), + postgres_hard_wait: Duration::from_secs( + parse_numeric_environment_variable("TESTS_POSTGRES_HARD_WAIT_SECONDS").unwrap_or(0), + ), + } + } +} + +/// An aggregator of all configuration and settings required to run a single +/// integration test. +#[derive(Debug)] +struct IntegrationTestRecipe { + postgres_uri: String, + ipfs_uri: String, + ganache_port: u16, + ganache_uri: String, + graph_node_ports: GraphNodePorts, + graph_node_bin: Arc, + test_directory: PathBuf, +} + +impl IntegrationTestRecipe { + fn test_name(&self) -> String { + basename(&self.test_directory) + } + + fn graph_node_admin_uri(&self) -> String { + format!("http://localhost:{}/", self.graph_node_ports.admin) + } +} + +/// Info about a finished test command +#[derive(Debug)] +struct IntegrationTestResult { + success: bool, + _exit_status_code: Option, + output: Output, +} + +#[derive(Debug)] +struct Output { + stdout: Option, + stderr: Option, +} + +impl std::fmt::Display for Output { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if let Some(ref stdout) = self.stdout { + write!(f, "{}", stdout)?; + } + if let Some(ref stderr) = self.stderr { + write!(f, "{}", stderr)? + } + Ok(()) + } +} + +// The results of a finished integration test +#[derive(Debug)] +struct IntegrationTestSummary { + test_recipe: IntegrationTestRecipe, + test_command_result: IntegrationTestResult, + graph_node_output: Output, +} + +impl IntegrationTestSummary { + fn print_outcome(&self) { + let status = match self.test_command_result.success { + true => "SUCCESS", + false => "FAILURE", + }; + println!("- Test: {}: {}", status, self.test_recipe.test_name()) + } + + fn print_failure(&self) { + if self.test_command_result.success { + return; + } + let test_name = self.test_recipe.test_name(); + println!("============="); + println!("\nFailed test: {}", test_name); + println!("-------------"); + println!("{:#?}", self.test_recipe); + println!("-------------"); + println!("\nFailed test command output:"); + println!("---------------------------"); + println!("{}", self.test_command_result.output); + println!("--------------------------"); + println!("graph-node command output:"); + println!("--------------------------"); + println!("{}", self.graph_node_output); + } +} + +/// The main test entrypoint. +#[tokio::test] +async fn parallel_integration_tests() -> anyhow::Result<()> { + let test_settings = IntegrationTestSettings::from_env(); + + let current_working_dir = + std::env::current_dir().context("failed to identify working directory")?; + let yarn_workspace_dir = current_working_dir.join("integration-tests"); + let test_dirs = INTEGRATION_TEST_DIRS + .iter() + .map(|p| yarn_workspace_dir.join(PathBuf::from(p))) + .collect::>(); + + // Show discovered tests. + println!("Found {} integration test(s):", test_dirs.len()); + for dir in &test_dirs { + println!(" - {}", basename(dir)); + } + + tokio::join!( + // Pull the required Docker images. + pull_service_images(), + // Run `yarn` command to build workspace. + run_yarn_command(&yarn_workspace_dir), + ); + + println!("Starting PostgreSQL and IPFS containers..."); + + // Not only do we start the containers, but we also need to wait for + // them to be up and running and ready to accept connections. + let (postgres, ipfs) = tokio::try_join!( + ServiceDependency::start( + ServiceContainerKind::Postgres, + "database system is ready to accept connections", + test_settings.postgres_hard_wait + ), + ServiceDependency::start( + ServiceContainerKind::Ipfs, + "Daemon is ready", + test_settings.ipfs_hard_wait + ), + )?; + + println!( + "Containers are ready! Running tests with N_CONCURRENT_TESTS={} ...", + test_settings.n_parallel_tests + ); + + let graph_node = Arc::new( + fs::canonicalize("../target/debug/graph-node") + .context("failed to infer `graph-node` program location. (Was it built already?)")?, + ); + + let stream = tokio_stream::iter(test_dirs) + .map(|dir| { + run_integration_test( + dir, + postgres.clone(), + ipfs.clone(), + graph_node.clone(), + test_settings.ganache_hard_wait, + ) + }) + .buffered(test_settings.n_parallel_tests as usize); + + let test_results: Vec = stream.try_collect().await?; + let failed = test_results.iter().any(|r| !r.test_command_result.success); + + // All tests have finished; we don't need the containers anymore. + tokio::try_join!( + async { + postgres + .container + .stop() + .await + .context("failed to stop container with Postgres") + }, + async { + ipfs.container + .stop() + .await + .context("failed to stop container with IPFS") + }, + )?; + + // print failures + for failed_test in test_results + .iter() + .filter(|t| !t.test_command_result.success) + { + failed_test.print_failure() + } + + // print test result summary + println!("\nTest results:"); + for test_result in &test_results { + test_result.print_outcome() + } + + if failed { + Err(anyhow::anyhow!("Some tests have failed")) + } else { + Ok(()) + } +} + +#[derive(Clone)] +struct ServiceDependency { + container: Arc, + ports: Arc, +} + +impl ServiceDependency { + async fn start( + service: ServiceContainerKind, + wait_msg: &str, + hard_wait: Duration, + ) -> anyhow::Result { + let service = ServiceContainer::start(service).await.context(format!( + "Failed to start container service `{}`", + service.name() + ))?; + + service + .wait_for_message(wait_msg.as_bytes(), hard_wait) + .await + .context(format!( + "failed to wait for {} container to be ready to accept connections", + service.container_name() + ))?; + + let ports = service.exposed_ports().await.context(format!( + "failed to obtain exposed ports for the `{}` container", + service.container_name() + ))?; + + Ok(Self { + container: Arc::new(service), + ports: Arc::new(ports), + }) + } +} + +/// Prepare and run the integration test +async fn run_integration_test( + test_directory: PathBuf, + postgres: ServiceDependency, + ipfs: ServiceDependency, + graph_node_bin: Arc, + ganache_hard_wait: Duration, +) -> anyhow::Result { + let db_name = + format!("{}-{}", basename(&test_directory), uuid::Uuid::new_v4()).replace("-", "_"); + + let (ganache, _) = tokio::try_join!( + // Start a dedicated Ganache container for this test. + async { + ServiceDependency::start( + ServiceContainerKind::Ganache, + "Listening on ", + ganache_hard_wait, + ) + .await + .context("failed to start Ganache container") + }, + // PostgreSQL is up and running, but we still need to create the database. + async { + ServiceContainer::create_postgres_database(&postgres.container, &db_name) + .await + .context("failed to create the test database.") + } + )?; + + // Build URIs. + let postgres_uri = { make_postgres_uri(&db_name, &postgres.ports) }; + let ipfs_uri = make_ipfs_uri(&ipfs.ports); + let (ganache_port, ganache_uri) = make_ganache_uri(&ganache.ports); + + let test_recipe = IntegrationTestRecipe { + postgres_uri, + ipfs_uri, + ganache_uri, + ganache_port, + graph_node_bin, + graph_node_ports: GraphNodePorts::random_free(), + test_directory, + }; + + // Spawn graph-node. + let mut graph_node_child_command = run_graph_node(&test_recipe)?; + + println!("Test started: {}", basename(&test_recipe.test_directory)); + let result = run_test_command(&test_recipe).await?; + + let (graph_node_output, _) = tokio::try_join!( + async { + // Stop graph-node and read its output. + stop_graph_node(&mut graph_node_child_command) + .await + .context("failed to stop graph-node") + }, + async { + // Stop Ganache. + ganache + .container + .stop() + .await + .context("failed to stop container service for Ganache") + } + )?; + + Ok(IntegrationTestSummary { + test_recipe, + test_command_result: result, + graph_node_output, + }) +} + +/// Runs a command for a integration test +async fn run_test_command( + test_recipe: &IntegrationTestRecipe, +) -> anyhow::Result { + let output = Command::new("yarn") + .arg("test") + .env("GANACHE_TEST_PORT", test_recipe.ganache_port.to_string()) + .env("GRAPH_NODE_ADMIN_URI", test_recipe.graph_node_admin_uri()) + .env( + "GRAPH_NODE_HTTP_PORT", + test_recipe.graph_node_ports.http.to_string(), + ) + .env( + "GRAPH_NODE_INDEX_PORT", + test_recipe.graph_node_ports.index.to_string(), + ) + .env("IPFS_URI", &test_recipe.ipfs_uri) + .current_dir(&test_recipe.test_directory) + .output() + .await + .context("failed to run test command")?; + + let test_name = test_recipe.test_name(); + let stdout_tag = format!("[{}:stdout] ", test_name); + let stderr_tag = format!("[{}:stderr] ", test_name); + + Ok(IntegrationTestResult { + _exit_status_code: output.status.code(), + success: output.status.success(), + output: Output { + stdout: Some(pretty_output(&output.stdout, &stdout_tag)), + stderr: Some(pretty_output(&output.stderr, &stderr_tag)), + }, + }) +} + +fn run_graph_node(recipe: &IntegrationTestRecipe) -> anyhow::Result { + use std::process::Stdio; + + let mut command = Command::new(recipe.graph_node_bin.as_os_str()); + command + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .arg("--postgres-url") + .arg(&recipe.postgres_uri) + .arg("--ethereum-rpc") + .arg(&recipe.ganache_uri) + .arg("--ipfs") + .arg(&recipe.ipfs_uri) + .arg("--http-port") + .arg(recipe.graph_node_ports.http.to_string()) + .arg("--index-node-port") + .arg(recipe.graph_node_ports.index.to_string()) + .arg("--ws-port") + .arg(recipe.graph_node_ports.ws.to_string()) + .arg("--admin-port") + .arg(recipe.graph_node_ports.admin.to_string()) + .arg("--metrics-port") + .arg(recipe.graph_node_ports.metrics.to_string()); + + command + .spawn() + .context("failed to start graph-node command.") +} + +async fn stop_graph_node(child: &mut Child) -> anyhow::Result { + child.kill().await.context("Failed to kill graph-node")?; + + // capture stdio + let stdout = match child.stdout.take() { + Some(mut data) => Some(process_stdio(&mut data, "[graph-node:stdout] ").await?), + None => None, + }; + let stderr = match child.stderr.take() { + Some(mut data) => Some(process_stdio(&mut data, "[graph-node:stderr] ").await?), + None => None, + }; + + Ok(Output { stdout, stderr }) +} + +async fn process_stdio( + stdio: &mut T, + prefix: &str, +) -> anyhow::Result { + let mut buffer: Vec = Vec::new(); + stdio + .read_to_end(&mut buffer) + .await + .context("failed to read stdio")?; + Ok(pretty_output(&buffer, prefix)) +} + +/// run yarn to build everything +async fn run_yarn_command(base_directory: &impl AsRef) { + let timer = std::time::Instant::now(); + println!("Running `yarn` command in integration tests root directory."); + let output = Command::new("yarn") + .current_dir(base_directory) + .output() + .await + .expect("failed to run yarn command"); + + if output.status.success() { + println!("`yarn` command finished in {}s", timer.elapsed().as_secs()); + return; + } + println!("Yarn command failed."); + println!("{}", pretty_output(&output.stdout, "[yarn:stdout]")); + println!("{}", pretty_output(&output.stderr, "[yarn:stderr]")); + panic!("Yarn command failed.") +} + +fn parse_numeric_environment_variable(environment_variable_name: &str) -> Option { + std::env::var(environment_variable_name) + .ok() + .and_then(|x| x.parse().ok()) +} diff --git a/tests/tests/parallel_tests.rs b/tests/tests/parallel_tests.rs deleted file mode 100644 index 976a502e3dc..00000000000 --- a/tests/tests/parallel_tests.rs +++ /dev/null @@ -1,434 +0,0 @@ -mod common; -use anyhow::Context; -use common::docker::{pull_images, DockerTestClient, TestContainerService}; -use futures::StreamExt; -use graph_tests::helpers::{ - basename, get_unique_ganache_counter, get_unique_postgres_counter, make_ganache_uri, - make_ipfs_uri, make_postgres_uri, pretty_output, GraphNodePorts, MappedPorts, -}; -use std::fs; -use std::path::{Path, PathBuf}; -use std::sync::Arc; -use tokio::io::AsyncReadExt; -use tokio::process::{Child, Command}; - -const DEFAULT_N_CONCURRENT_TESTS: usize = 15; - -lazy_static::lazy_static! { - static ref GANACHE_HARD_WAIT_SECONDS: Option = - parse_numeric_environment_variable("TESTS_GANACHE_HARD_WAIT_SECONDS"); - static ref IPFS_HARD_WAIT_SECONDS: Option = - parse_numeric_environment_variable("TESTS_IPFS_HARD_WAIT_SECONDS"); - static ref POSTGRES_HARD_WAIT_SECONDS: Option = - parse_numeric_environment_variable("TESTS_POSTGRES_HARD_WAIT_SECONDS"); -} - -/// All integration tests subdirectories to run -pub const INTEGRATION_TESTS_DIRECTORIES: [&str; 9] = [ - "api-version-v0-0-4", - "fatal-error", - "ganache-reverts", - "host-exports", - "non-fatal-errors", - "overloaded-contract-functions", - "poi-for-failed-subgraph", - "remove-then-update", - "value-roundtrip", -]; - -/// Contains all information a test command needs -#[derive(Debug)] -struct IntegrationTestSetup { - postgres_uri: String, - ipfs_uri: String, - ganache_port: u16, - ganache_uri: String, - graph_node_ports: GraphNodePorts, - graph_node_bin: Arc, - test_directory: PathBuf, -} - -impl IntegrationTestSetup { - fn test_name(&self) -> String { - basename(&self.test_directory) - } - - fn graph_node_admin_uri(&self) -> String { - let ws_port = self.graph_node_ports.admin; - format!("http://localhost:{}/", ws_port) - } -} - -/// Info about a finished test command -#[derive(Debug)] -struct TestCommandResults { - success: bool, - _exit_code: Option, - stdout: String, - stderr: String, -} - -#[derive(Debug)] -struct StdIO { - stdout: Option, - stderr: Option, -} -impl std::fmt::Display for StdIO { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if let Some(ref stdout) = self.stdout { - write!(f, "{}", stdout)?; - } - if let Some(ref stderr) = self.stderr { - write!(f, "{}", stderr)? - } - Ok(()) - } -} - -// The results of a finished integration test -#[derive(Debug)] -struct IntegrationTestResult { - test_setup: IntegrationTestSetup, - test_command_results: TestCommandResults, - graph_node_stdio: StdIO, -} - -impl IntegrationTestResult { - fn print_outcome(&self) { - let status = match self.test_command_results.success { - true => "SUCCESS", - false => "FAILURE", - }; - println!("- Test: {}: {}", status, self.test_setup.test_name()) - } - - fn print_failure(&self) { - if self.test_command_results.success { - return; - } - let test_name = self.test_setup.test_name(); - println!("============="); - println!("\nFailed test: {}", test_name); - println!("-------------"); - println!("{:#?}", self.test_setup); - println!("-------------"); - println!("\nFailed test command output:"); - println!("---------------------------"); - println!("{}", self.test_command_results.stdout); - println!("{}", self.test_command_results.stderr); - println!("--------------------------"); - println!("graph-node command output:"); - println!("--------------------------"); - println!("{}", self.graph_node_stdio); - } -} - -/// The main test entrypoint -#[tokio::test] -async fn parallel_integration_tests() -> anyhow::Result<()> { - // use a environment variable for limiting the number of concurrent tests - let n_parallel_tests: usize = std::env::var("N_CONCURRENT_TESTS") - .ok() - .and_then(|x| x.parse().ok()) - .unwrap_or(DEFAULT_N_CONCURRENT_TESTS); - - let current_working_directory = - std::env::current_dir().context("failed to identify working directory")?; - let integration_tests_root_directory = current_working_directory.join("integration-tests"); - - // pull required docker images - pull_images().await; - - let test_directories = INTEGRATION_TESTS_DIRECTORIES - .iter() - .map(|ref p| integration_tests_root_directory.join(PathBuf::from(p))) - .collect::>(); - - // Show discovered tests - println!("Found {} integration tests:", test_directories.len()); - for dir in &test_directories { - println!(" - {}", basename(dir)); - } - - // run `yarn` command to build workspace - run_yarn_command(&integration_tests_root_directory).await; - - // start docker containers for Postgres and IPFS and wait for them to be ready - let postgres = Arc::new( - DockerTestClient::start(TestContainerService::Postgres) - .await - .context("failed to start container service for Postgres.")?, - ); - postgres - .wait_for_message( - b"database system is ready to accept connections", - &*POSTGRES_HARD_WAIT_SECONDS, - ) - .await - .context("failed to wait for Postgres container to be ready to accept connections")?; - - let ipfs = DockerTestClient::start(TestContainerService::Ipfs) - .await - .context("failed to start container service for IPFS.")?; - ipfs.wait_for_message(b"Daemon is ready", &*IPFS_HARD_WAIT_SECONDS) - .await - .context("failed to wait for Ipfs container to be ready to accept connections")?; - - let postgres_ports = Arc::new( - postgres - .exposed_ports() - .await - .context("failed to obtain exposed ports for the Postgres container")?, - ); - let ipfs_ports = Arc::new( - ipfs.exposed_ports() - .await - .context("failed to obtain exposed ports for the IPFS container")?, - ); - - let graph_node = Arc::new( - fs::canonicalize("../target/debug/graph-node") - .context("failed to infer `graph-node` program location. (Was it built already?)")?, - ); - - // run tests - let mut test_results = Vec::new(); - - let mut stream = tokio_stream::iter(test_directories) - .map(|dir| { - run_integration_test( - dir.clone(), - postgres.clone(), - postgres_ports.clone(), - ipfs_ports.clone(), - graph_node.clone(), - ) - }) - .buffered(n_parallel_tests); - - let mut failed = false; - while let Some(test_result) = stream.next().await { - let test_result = test_result?; - if !test_result.test_command_results.success { - failed = true; - } - test_results.push(test_result); - } - - // Stop containers. - postgres - .stop() - .await - .context("failed to stop container service for Postgres")?; - ipfs.stop() - .await - .context("failed to stop container service for IPFS")?; - - // print failures - for failed_test in test_results - .iter() - .filter(|t| !t.test_command_results.success) - { - failed_test.print_failure() - } - - // print test result summary - println!("\nTest results:"); - for test_result in &test_results { - test_result.print_outcome() - } - - if failed { - Err(anyhow::anyhow!("Some tests have failed")) - } else { - Ok(()) - } -} - -/// Prepare and run the integration test -async fn run_integration_test( - test_directory: PathBuf, - postgres_docker: Arc, - postgres_ports: Arc, - ipfs_ports: Arc, - graph_node_bin: Arc, -) -> anyhow::Result { - // start a dedicated ganache container for this test - let unique_ganache_counter = get_unique_ganache_counter(); - let ganache = DockerTestClient::start(TestContainerService::Ganache(unique_ganache_counter)) - .await - .context("failed to start container service for Ganache.")?; - ganache - .wait_for_message(b"Listening on ", &*GANACHE_HARD_WAIT_SECONDS) - .await - .context("failed to wait for Ganache container to be ready to accept connections")?; - - let ganache_ports: MappedPorts = ganache - .exposed_ports() - .await - .context("failed to obtain exposed ports for Ganache container")?; - - // build URIs - let postgres_unique_id = get_unique_postgres_counter(); - - let postgres_uri = make_postgres_uri(&postgres_unique_id, &postgres_ports); - let ipfs_uri = make_ipfs_uri(&ipfs_ports); - let (ganache_port, ganache_uri) = make_ganache_uri(&ganache_ports); - - // create test database - DockerTestClient::create_postgres_database(&postgres_docker, &postgres_unique_id) - .await - .context("failed to create the test database.")?; - - // prepare to run test comand - let test_setup = IntegrationTestSetup { - postgres_uri, - ipfs_uri, - ganache_uri, - ganache_port, - graph_node_bin, - graph_node_ports: GraphNodePorts::get_ports(), - test_directory, - }; - - // spawn graph-node - let mut graph_node_child_command = run_graph_node(&test_setup).await?; - - println!("Test started: {}", basename(&test_setup.test_directory)); - let test_command_results = run_test_command(&test_setup).await?; - - // stop graph-node - - let graph_node_stdio = stop_graph_node(&mut graph_node_child_command).await?; - // stop ganache container - ganache - .stop() - .await - .context("failed to stop container service for Ganache")?; - - Ok(IntegrationTestResult { - test_setup, - test_command_results, - graph_node_stdio, - }) -} - -/// Runs a command for a integration test -async fn run_test_command(test_setup: &IntegrationTestSetup) -> anyhow::Result { - let output = Command::new("yarn") - .arg("test") - .env("GANACHE_TEST_PORT", test_setup.ganache_port.to_string()) - .env("GRAPH_NODE_ADMIN_URI", test_setup.graph_node_admin_uri()) - .env( - "GRAPH_NODE_HTTP_PORT", - test_setup.graph_node_ports.http.to_string(), - ) - .env( - "GRAPH_NODE_INDEX_PORT", - test_setup.graph_node_ports.index.to_string(), - ) - .env("IPFS_URI", &test_setup.ipfs_uri) - .current_dir(&test_setup.test_directory) - .output() - .await - .context("failed to run test command")?; - - let test_name = test_setup.test_name(); - let stdout_tag = format!("[{}:stdout] ", test_name); - let stderr_tag = format!("[{}:stderr] ", test_name); - - Ok(TestCommandResults { - success: output.status.success(), - _exit_code: output.status.code(), - stdout: pretty_output(&output.stdout, &stdout_tag), - stderr: pretty_output(&output.stderr, &stderr_tag), - }) -} -async fn run_graph_node(test_setup: &IntegrationTestSetup) -> anyhow::Result { - use std::process::Stdio; - - let mut command = Command::new(test_setup.graph_node_bin.as_os_str()); - command - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - // postgres - .arg("--postgres-url") - .arg(&test_setup.postgres_uri) - // ethereum - .arg("--ethereum-rpc") - .arg(&test_setup.ganache_uri) - // ipfs - .arg("--ipfs") - .arg(&test_setup.ipfs_uri) - // http port - .arg("--http-port") - .arg(test_setup.graph_node_ports.http.to_string()) - // index node port - .arg("--index-node-port") - .arg(test_setup.graph_node_ports.index.to_string()) - // ws port - .arg("--ws-port") - .arg(test_setup.graph_node_ports.ws.to_string()) - // admin port - .arg("--admin-port") - .arg(test_setup.graph_node_ports.admin.to_string()) - // metrics port - .arg("--metrics-port") - .arg(test_setup.graph_node_ports.metrics.to_string()); - - command - .spawn() - .context("failed to start graph-node command.") -} - -async fn stop_graph_node(child: &mut Child) -> anyhow::Result { - child.kill().await.context("Failed to kill graph-node")?; - - // capture stdio - let stdout = match child.stdout.take() { - Some(mut data) => Some(process_stdio(&mut data, "[graph-node:stdout] ").await?), - None => None, - }; - let stderr = match child.stderr.take() { - Some(mut data) => Some(process_stdio(&mut data, "[graph-node:stderr] ").await?), - None => None, - }; - - Ok(StdIO { stdout, stderr }) -} - -async fn process_stdio( - stdio: &mut T, - prefix: &str, -) -> anyhow::Result { - let mut buffer: Vec = Vec::new(); - stdio - .read_to_end(&mut buffer) - .await - .context("failed to read stdio")?; - Ok(pretty_output(&buffer, prefix)) -} - -/// run yarn to build everything -async fn run_yarn_command(base_directory: &impl AsRef) { - println!("Running `yarn` command in integration tests root directory."); - let output = Command::new("yarn") - .current_dir(base_directory) - .output() - .await - .expect("failed to run yarn command"); - - if output.status.success() { - return; - } - println!("Yarn command failed."); - println!("{}", pretty_output(&output.stdout, "[yarn:stdout]")); - println!("{}", pretty_output(&output.stderr, "[yarn:stderr]")); - panic!("Yarn command failed.") -} - -fn parse_numeric_environment_variable(environment_variable_name: &str) -> Option { - std::env::var(environment_variable_name) - .ok() - .and_then(|x| x.parse().ok()) -} diff --git a/tests/tests/runner.rs b/tests/tests/runner.rs deleted file mode 100644 index 133be063034..00000000000 --- a/tests/tests/runner.rs +++ /dev/null @@ -1,153 +0,0 @@ -use std::sync::Arc; - -use cid::Cid; -use graph::blockchain::{Block, BlockPtr}; -use graph::object; -use graph::prelude::ethabi::ethereum_types::H256; -use graph::prelude::{SubgraphAssignmentProvider, SubgraphName}; -use graph_tests::fixture::ethereum::{chain, empty_block, genesis}; -use graph_tests::fixture::{self, stores, test_ptr}; - -#[tokio::test] -async fn data_source_revert() -> anyhow::Result<()> { - let stores = stores("./integration-tests/config.simple.toml").await; - - let subgraph_name = SubgraphName::new("data-source-revert").unwrap(); - let hash = { - let test_dir = format!("./integration-tests/{}", subgraph_name); - fixture::build_subgraph(&test_dir).await - }; - - let blocks = { - let block0 = genesis(); - let block1 = empty_block(block0.ptr(), test_ptr(1)); - let block1_reorged_ptr = BlockPtr { - number: 1, - hash: H256::from_low_u64_be(12).into(), - }; - let block1_reorged = empty_block(block0.ptr(), block1_reorged_ptr.clone()); - let block2 = empty_block(block1_reorged_ptr, test_ptr(2)); - let block3 = empty_block(block2.ptr(), test_ptr(3)); - let block4 = empty_block(block3.ptr(), test_ptr(4)); - vec![block0, block1, block1_reorged, block2, block3, block4] - }; - - let chain = Arc::new(chain(blocks.clone(), &stores).await); - let ctx = fixture::setup(subgraph_name.clone(), &hash, &stores, chain.clone(), None).await; - - let stop_block = test_ptr(2); - ctx.start_and_sync_to(stop_block).await; - ctx.provider.stop(ctx.deployment.clone()).await.unwrap(); - - // Test loading data sources from DB. - let stop_block = test_ptr(3); - ctx.start_and_sync_to(stop_block).await; - - // Test grafted version - let subgraph_name = SubgraphName::new("data-source-revert-grafted").unwrap(); - let hash = fixture::build_subgraph_with_yarn_cmd( - "./integration-tests/data-source-revert", - "deploy:test-grafted", - ) - .await; - let graft_block = Some(test_ptr(3)); - let ctx = fixture::setup(subgraph_name.clone(), &hash, &stores, chain, graft_block).await; - let stop_block = test_ptr(4); - ctx.start_and_sync_to(stop_block).await; - - let query_res = ctx - .query(r#"{ dataSourceCount(id: "4") { id, count } }"#) - .await - .unwrap(); - - // TODO: The semantically correct value for `count` would be 5. But because the test fixture - // uses a `NoopTriggersAdapter` the data sources are not reprocessed in the block in which they - // are created. - assert_eq!( - query_res, - Some(object! { dataSourceCount: object!{ id: "4", count: 4 } }) - ); - - Ok(()) -} - -#[tokio::test] -async fn typename() -> anyhow::Result<()> { - let subgraph_name = SubgraphName::new("typename").unwrap(); - - let hash = { - let test_dir = format!("./integration-tests/{}", subgraph_name); - fixture::build_subgraph(&test_dir).await - }; - - let blocks = { - let block_0 = genesis(); - let block_1 = empty_block(block_0.ptr(), test_ptr(1)); - let block_1_reorged_ptr = BlockPtr { - number: 1, - hash: H256::from_low_u64_be(12).into(), - }; - let block_1_reorged = empty_block(block_0.ptr(), block_1_reorged_ptr); - let block_2 = empty_block(block_1_reorged.ptr(), test_ptr(2)); - let block_3 = empty_block(block_2.ptr(), test_ptr(3)); - vec![block_0, block_1, block_1_reorged, block_2, block_3] - }; - - let stop_block = blocks.last().unwrap().block.ptr(); - - let stores = stores("./integration-tests/config.simple.toml").await; - let chain = Arc::new(chain(blocks, &stores).await); - let ctx = fixture::setup(subgraph_name.clone(), &hash, &stores, chain, None).await; - - ctx.start_and_sync_to(stop_block).await; - - Ok(()) -} - -#[tokio::test] -async fn file_data_sources() { - let stores = stores("./integration-tests/config.simple.toml").await; - - let subgraph_name = SubgraphName::new("file-data-sources").unwrap(); - let hash = { - let test_dir = format!("./integration-tests/{}", subgraph_name); - fixture::build_subgraph(&test_dir).await - }; - - let blocks = { - let block_0 = genesis(); - let block_1 = empty_block(block_0.ptr(), test_ptr(1)); - let block_2 = empty_block(block_1.ptr(), test_ptr(2)); - vec![block_0, block_1, block_2] - }; - let stop_block = test_ptr(1); - let chain = Arc::new(chain(blocks, &stores).await); - let ctx = fixture::setup(subgraph_name.clone(), &hash, &stores, chain, None).await; - ctx.start_and_sync_to(stop_block).await; - - // CID QmVkvoPGi9jvvuxsHDVJDgzPEzagBaWSZRYoRDzU244HjZ is the file - // `file-data-sources/abis/Contract.abi` after being processed by graph-cli. - let id = format!( - "0x{}", - hex::encode( - Cid::try_from("QmVkvoPGi9jvvuxsHDVJDgzPEzagBaWSZRYoRDzU244HjZ") - .unwrap() - .to_bytes(), - ) - ); - - let query_res = ctx - .query(&format!(r#"{{ ipfsFile(id: "{id}") {{ id, content }} }}"#,)) - .await - .unwrap(); - - assert_eq!( - query_res, - Some(object! { ipfsFile: object!{ id: id , content: "[]" } }) - ); - - // Test loading offchain data sources from DB. - ctx.provider.stop(ctx.deployment.clone()).await.unwrap(); - let stop_block = test_ptr(2); - ctx.start_and_sync_to(stop_block).await; -} diff --git a/tests/tests/runner_tests.rs b/tests/tests/runner_tests.rs new file mode 100644 index 00000000000..f3baf8a6725 --- /dev/null +++ b/tests/tests/runner_tests.rs @@ -0,0 +1,500 @@ +use std::marker::PhantomData; +use std::process::Command; +use std::sync::atomic::{self, AtomicBool}; +use std::sync::Arc; +use std::time::Duration; + +use assert_json_diff::assert_json_eq; +use graph::blockchain::block_stream::BlockWithTriggers; +use graph::blockchain::{Block, BlockPtr, Blockchain}; +use graph::data::subgraph::schema::{SubgraphError, SubgraphHealth}; +use graph::data_source::CausalityRegion; +use graph::env::EnvVars; +use graph::ipfs_client::IpfsClient; +use graph::object; +use graph::prelude::ethabi::ethereum_types::H256; +use graph::prelude::{ + CheapClone, DeploymentHash, SubgraphAssignmentProvider, SubgraphName, SubgraphStore, +}; +use graph_tests::fixture::ethereum::{chain, empty_block, genesis, push_test_log}; +use graph_tests::fixture::{ + self, stores, test_ptr, test_ptr_reorged, MockAdapterSelector, NoopAdapterSelector, Stores, +}; +use graph_tests::helpers::run_cmd; +use slog::{o, Discard, Logger}; + +struct RunnerTestRecipe { + stores: Stores, + subgraph_name: SubgraphName, + hash: DeploymentHash, +} + +impl RunnerTestRecipe { + async fn new(subgraph_name: &str) -> Self { + let subgraph_name = SubgraphName::new(subgraph_name).unwrap(); + let test_dir = format!("./runner-tests/{}", subgraph_name); + + let (stores, hash) = tokio::join!( + stores("./runner-tests/config.simple.toml"), + build_subgraph(&test_dir) + ); + + Self { + stores, + subgraph_name, + hash, + } + } +} + +#[tokio::test] +async fn data_source_revert() -> anyhow::Result<()> { + let RunnerTestRecipe { + stores, + subgraph_name, + hash, + } = RunnerTestRecipe::new("data-source-revert").await; + + let blocks = { + let block0 = genesis(); + let block1 = empty_block(block0.ptr(), test_ptr(1)); + let block1_reorged_ptr = BlockPtr { + number: 1, + hash: H256::from_low_u64_be(12).into(), + }; + let block1_reorged = empty_block(block0.ptr(), block1_reorged_ptr.clone()); + let block2 = empty_block(block1_reorged_ptr, test_ptr(2)); + let block3 = empty_block(block2.ptr(), test_ptr(3)); + let block4 = empty_block(block3.ptr(), test_ptr(4)); + vec![block0, block1, block1_reorged, block2, block3, block4] + }; + + let chain = chain(blocks.clone(), &stores, None).await; + let ctx = fixture::setup(subgraph_name.clone(), &hash, &stores, &chain, None, None).await; + + let stop_block = test_ptr(2); + ctx.start_and_sync_to(stop_block).await; + ctx.provider.stop(ctx.deployment.clone()).await.unwrap(); + + // Test loading data sources from DB. + let stop_block = test_ptr(3); + ctx.start_and_sync_to(stop_block).await; + + // Test grafted version + let subgraph_name = SubgraphName::new("data-source-revert-grafted").unwrap(); + let hash = + build_subgraph_with_yarn_cmd("./runner-tests/data-source-revert", "deploy:test-grafted") + .await; + let graft_block = Some(test_ptr(3)); + let ctx = fixture::setup( + subgraph_name.clone(), + &hash, + &stores, + &chain, + graft_block, + None, + ) + .await; + let stop_block = test_ptr(4); + ctx.start_and_sync_to(stop_block).await; + + let query_res = ctx + .query(r#"{ dataSourceCount(id: "4") { id, count } }"#) + .await + .unwrap(); + + // TODO: The semantically correct value for `count` would be 5. But because the test fixture + // uses a `NoopTriggersAdapter` the data sources are not reprocessed in the block in which they + // are created. + assert_eq!( + query_res, + Some(object! { dataSourceCount: object!{ id: "4", count: 4 } }) + ); + + Ok(()) +} + +#[tokio::test] +async fn typename() -> anyhow::Result<()> { + let RunnerTestRecipe { + stores, + subgraph_name, + hash, + } = RunnerTestRecipe::new("typename").await; + + let blocks = { + let block_0 = genesis(); + let block_1 = empty_block(block_0.ptr(), test_ptr(1)); + let block_1_reorged_ptr = BlockPtr { + number: 1, + hash: H256::from_low_u64_be(12).into(), + }; + let block_1_reorged = empty_block(block_0.ptr(), block_1_reorged_ptr); + let block_2 = empty_block(block_1_reorged.ptr(), test_ptr(2)); + let block_3 = empty_block(block_2.ptr(), test_ptr(3)); + vec![block_0, block_1, block_1_reorged, block_2, block_3] + }; + + let stop_block = blocks.last().unwrap().block.ptr(); + + let chain = chain(blocks, &stores, None).await; + let ctx = fixture::setup(subgraph_name.clone(), &hash, &stores, &chain, None, None).await; + + ctx.start_and_sync_to(stop_block).await; + + Ok(()) +} + +#[tokio::test] +async fn file_data_sources() { + let RunnerTestRecipe { + stores, + subgraph_name, + hash, + } = RunnerTestRecipe::new("file-data-sources").await; + + let blocks = { + let block_0 = genesis(); + let block_1 = empty_block(block_0.ptr(), test_ptr(1)); + let block_2 = empty_block(block_1.ptr(), test_ptr(2)); + let block_3 = empty_block(block_2.ptr(), test_ptr(3)); + let block_4 = empty_block(block_3.ptr(), test_ptr(4)); + let mut block_5 = empty_block(block_4.ptr(), test_ptr(5)); + push_test_log(&mut block_5, "createFile2"); + vec![block_0, block_1, block_2, block_3, block_4, block_5] + }; + + // This test assumes the file data sources will be processed in the same block in which they are + // created. But the test might fail due to a race condition if for some reason it takes longer + // than expected to fetch the file from IPFS. The sleep here will conveniently happen after the + // data source is added to the offchain monitor but before the monitor is checked, in an an + // attempt to ensure the monitor has enough time to fetch the file. + let adapter_selector = NoopAdapterSelector { + x: PhantomData, + triggers_in_block_sleep: Duration::from_millis(150), + }; + let chain = chain(blocks.clone(), &stores, Some(Arc::new(adapter_selector))).await; + let ctx = fixture::setup(subgraph_name.clone(), &hash, &stores, &chain, None, None).await; + ctx.start_and_sync_to(test_ptr(1)).await; + + // CID of `file-data-sources/abis/Contract.abi` after being processed by graph-cli. + let id = "QmQ2REmceVtzawp7yrnxLQXgNNCtFHEnig6fL9aqE1kcWq"; + let content_bytes = ctx.ipfs.cat_all(id, Duration::from_secs(10)).await.unwrap(); + let content = String::from_utf8(content_bytes.into()).unwrap(); + let query_res = ctx + .query(&format!(r#"{{ ipfsFile(id: "{id}") {{ id, content }} }}"#,)) + .await + .unwrap(); + + assert_json_eq!( + query_res, + Some(object! { ipfsFile: object!{ id: id.clone() , content: content.clone() } }) + ); + + // assert whether duplicate data sources are created. + ctx.start_and_sync_to(test_ptr(2)).await; + + let store = ctx.store.cheap_clone(); + let writable = store + .writable(ctx.logger.clone(), ctx.deployment.id) + .await + .unwrap(); + let datasources = writable.load_dynamic_data_sources(vec![]).await.unwrap(); + assert!(datasources.len() == 1); + + ctx.start_and_sync_to(test_ptr(3)).await; + + let query_res = ctx + .query(&format!(r#"{{ ipfsFile1(id: "{id}") {{ id, content }} }}"#,)) + .await + .unwrap(); + + assert_json_eq!( + query_res, + Some(object! { ipfsFile1: object!{ id: id , content: content } }) + ); + + ctx.start_and_sync_to(test_ptr(4)).await; + let writable = ctx + .store + .clone() + .writable(ctx.logger.clone(), ctx.deployment.id.clone()) + .await + .unwrap(); + let data_sources = writable.load_dynamic_data_sources(vec![]).await.unwrap(); + assert!(data_sources.len() == 2); + + let mut causality_region = CausalityRegion::ONCHAIN; + for data_source in data_sources { + assert!(data_source.done_at.is_some()); + assert!(data_source.causality_region == causality_region.next()); + causality_region = causality_region.next(); + } + + let stop_block = test_ptr(5); + let err = ctx.start_and_sync_to_error(stop_block.clone()).await; + let message = "entity type `IpfsFile1` is not on the 'entities' list for data source `File2`. \ + Hint: Add `IpfsFile1` to the 'entities' list, which currently is: `IpfsFile`.\twasm backtrace:\t 0: 0x365d - !src/mapping/handleFile1\t in handler `handleFile1` at block #5 ()".to_string(); + let expected_err = SubgraphError { + subgraph_id: ctx.deployment.hash.clone(), + message, + block_ptr: Some(stop_block), + handler: None, + deterministic: false, + }; + assert_eq!(err, expected_err); + + // Unfail the subgraph to test a conflict between an onchain and offchain entity + { + ctx.rewind(test_ptr(4)); + + // Replace block number 5 with one that contains a different event + let mut blocks = blocks.clone(); + blocks.pop(); + let block_5_1_ptr = test_ptr_reorged(5, 1); + let mut block_5_1 = empty_block(test_ptr(4), block_5_1_ptr.clone()); + push_test_log(&mut block_5_1, "saveConflictingEntity"); + blocks.push(block_5_1); + + chain.set_block_stream(blocks); + + // Errors in the store pipeline can be observed by using the runner directly. + let runner = ctx.runner(block_5_1_ptr.clone()).await; + let err = runner + .run() + .await + .err() + .unwrap_or_else(|| panic!("subgraph ran successfully but an error was expected")); + + let message = + "store error: conflicting key value violates exclusion constraint \"ipfs_file_id_block_range_excl\"" + .to_string(); + assert_eq!(err.to_string(), message); + } + + // Unfail the subgraph to test a conflict between an onchain and offchain entity + { + // Replace block number 5 with one that contains a different event + let mut blocks = blocks.clone(); + blocks.pop(); + let block_5_2_ptr = test_ptr_reorged(5, 2); + let mut block_5_2 = empty_block(test_ptr(4), block_5_2_ptr.clone()); + push_test_log(&mut block_5_2, "createFile1"); + blocks.push(block_5_2); + + chain.set_block_stream(blocks); + + // Errors in the store pipeline can be observed by using the runner directly. + let err = ctx + .runner(block_5_2_ptr.clone()) + .await + .run() + .await + .err() + .unwrap_or_else(|| panic!("subgraph ran successfully but an error was expected")); + + let message = + "store error: conflicting key value violates exclusion constraint \"ipfs_file_1_id_block_range_excl\"" + .to_string(); + assert_eq!(err.to_string(), message); + } +} + +#[tokio::test] +async fn template_static_filters_false_positives() { + let RunnerTestRecipe { + stores, + subgraph_name, + hash, + } = RunnerTestRecipe::new("dynamic-data-source").await; + + let blocks = { + let block_0 = genesis(); + let block_1 = empty_block(block_0.ptr(), test_ptr(1)); + let block_2 = empty_block(block_1.ptr(), test_ptr(2)); + vec![block_0, block_1, block_2] + }; + let stop_block = test_ptr(1); + let chain = chain(blocks, &stores, None).await; + + let mut env_vars = EnvVars::default(); + env_vars.experimental_static_filters = true; + + let ctx = fixture::setup( + subgraph_name.clone(), + &hash, + &stores, + &chain, + None, + Some(env_vars), + ) + .await; + ctx.start_and_sync_to(stop_block).await; + + let poi = ctx + .store + .get_proof_of_indexing(&ctx.deployment.hash, &None, test_ptr(1)) + .await + .unwrap(); + + // This check exists to prevent regression of https://github.com/graphprotocol/graph-node/issues/3963 + // when false positives go through the block stream, they should be discarded by + // `DataSource::match_and_decode`. The POI below is generated consistently from the empty + // POI table. If this fails it's likely that either the bug was re-introduced or there is + // a change in the POI infrastructure. Or the subgraph id changed. + assert_eq!( + poi.unwrap(), + [ + 172, 174, 50, 50, 108, 187, 89, 216, 16, 123, 40, 207, 250, 97, 247, 138, 180, 67, 20, + 5, 114, 187, 237, 104, 187, 122, 220, 9, 131, 67, 50, 237 + ], + ); +} + +#[tokio::test] +async fn retry_create_ds() { + let RunnerTestRecipe { + stores, + subgraph_name, + hash, + } = RunnerTestRecipe::new("data-source-revert2").await; + + let blocks = { + let block0 = genesis(); + let block1 = empty_block(block0.ptr(), test_ptr(1)); + let block1_reorged_ptr = BlockPtr { + number: 1, + hash: H256::from_low_u64_be(12).into(), + }; + let block1_reorged = empty_block(block0.ptr(), block1_reorged_ptr.clone()); + let block2 = empty_block(block1_reorged.ptr(), test_ptr(2)); + vec![block0, block1, block1_reorged, block2] + }; + let stop_block = blocks.last().unwrap().block.ptr(); + + let called = AtomicBool::new(false); + let triggers_in_block = Arc::new( + move |block: ::Block| { + let logger = Logger::root(Discard, o!()); + // Comment this out and the test will pass. + if block.number() > 0 && !called.load(atomic::Ordering::SeqCst) { + called.store(true, atomic::Ordering::SeqCst); + return Err(anyhow::anyhow!("This error happens once")); + } + Ok(BlockWithTriggers::new(block, Vec::new(), &logger)) + }, + ); + let triggers_adapter = Arc::new(MockAdapterSelector { + x: PhantomData, + triggers_in_block_sleep: Duration::ZERO, + triggers_in_block, + }); + let chain = chain(blocks, &stores, Some(triggers_adapter)).await; + + let mut env_vars = EnvVars::default(); + env_vars.subgraph_error_retry_ceil = Duration::from_secs(1); + + let ctx = fixture::setup( + subgraph_name.clone(), + &hash, + &stores, + &chain, + None, + Some(env_vars), + ) + .await; + + let runner = ctx + .runner(stop_block) + .await + .run_for_test(true) + .await + .unwrap(); + assert_eq!(runner.context().instance().hosts().len(), 2); +} + +#[tokio::test] +async fn fatal_error() -> anyhow::Result<()> { + let RunnerTestRecipe { + stores, + subgraph_name, + hash, + } = RunnerTestRecipe::new("fatal-error").await; + + let blocks = { + let block_0 = genesis(); + let block_1 = empty_block(block_0.ptr(), test_ptr(1)); + let block_2 = empty_block(block_1.ptr(), test_ptr(2)); + let block_3 = empty_block(block_2.ptr(), test_ptr(3)); + vec![block_0, block_1, block_2, block_3] + }; + + let stop_block = blocks.last().unwrap().block.ptr(); + + let chain = chain(blocks, &stores, None).await; + let ctx = fixture::setup(subgraph_name.clone(), &hash, &stores, &chain, None, None).await; + + ctx.start_and_sync_to_error(stop_block).await; + + // Go through the indexing status API to also test it. + let status = ctx.indexing_status().await; + assert!(status.health == SubgraphHealth::Failed); + assert!(status.entity_count == 1.into()); // Only PoI + let err = status.fatal_error.unwrap(); + assert!(err.block.number == 3.into()); + assert!(err.deterministic); + + // Test that rewind unfails the subgraph. + ctx.rewind(test_ptr(1)); + let status = ctx.indexing_status().await; + assert!(status.health == SubgraphHealth::Healthy); + assert!(status.fatal_error.is_none()); + + Ok(()) +} + +async fn build_subgraph(dir: &str) -> DeploymentHash { + build_subgraph_with_yarn_cmd(dir, "deploy:test").await +} + +async fn build_subgraph_with_yarn_cmd(dir: &str, yarn_cmd: &str) -> DeploymentHash { + // Test that IPFS is up. + IpfsClient::localhost() + .test() + .await + .expect("Could not connect to IPFS, make sure it's running at port 5001"); + + // Make sure dependencies are present. + + run_cmd( + Command::new("yarn") + .arg("install") + .arg("--mutex") + .arg("file:.yarn-mutex") + .current_dir("./runner-tests/"), + ); + + // Run codegen. + run_cmd(Command::new("yarn").arg("codegen").current_dir(&dir)); + + // Run `deploy` for the side effect of uploading to IPFS, the graph node url + // is fake and the actual deploy call is meant to fail. + let deploy_output = run_cmd( + Command::new("yarn") + .arg(yarn_cmd) + .env("IPFS_URI", "http://127.0.0.1:5001") + .env("GRAPH_NODE_ADMIN_URI", "http://localhost:0") + .current_dir(dir), + ); + + // Hack to extract deployment id from `graph deploy` output. + const ID_PREFIX: &str = "Build completed: "; + let mut line = deploy_output + .lines() + .find(|line| line.contains(ID_PREFIX)) + .expect("found no matching line"); + if !line.starts_with(ID_PREFIX) { + line = &line[5..line.len() - 5]; // workaround for colored output + } + DeploymentHash::new(line.trim_start_matches(ID_PREFIX)).unwrap() +}